diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b71a9e02db..180172c6012 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +Release v0.17.0 (2019-11-20) +=== + +Services +--- +* Synced the V2 SDK with latest AWS service API definitions. + +SDK Enhancements +--- +* SDK minimum version requirement has been updated to Go 1.12 ([#432](https://github.com/aws/aws-sdk-go-v2/pull/432)) + Release v0.16.0 (2019-11-12) === diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 080c6c3a961..cb7c688ed3e 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -6,7 +6,6 @@ SDK Features SDK Enhancements --- -* SDK minimum version requirement has been updated to Go 1.12 ([#432](https://github.com/aws/aws-sdk-go-v2/pull/432)) SDK Bugs --- diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index c1536fdc42e..5d1234684d6 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -1116,6 +1116,22 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "dataexchange": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "datapipeline": service{ Endpoints: endpoints{ @@ -1706,11 +1722,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -2817,6 +2838,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3513,6 +3538,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3522,26 +3551,11 @@ var awsPartition = partition{ "shield": service{ IsRegionalized: boxedFalse, Defaults: endpoint{ - SSLCommonName: "shield.ca-central-1.amazonaws.com", + SSLCommonName: "shield.us-east-1.amazonaws.com", Protocols: []string{"https"}, }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1": endpoint{}, }, }, "sms": service{ @@ -4243,6 +4257,12 @@ var awscnPartition = partition{ Unresolveable: boxedTrue, }, }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "directconnect": service{ Endpoints: endpoints{ @@ -4630,6 +4650,12 @@ var awscnPartition = partition{ }, }, }, + "workspaces": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, }, } diff --git a/aws/version.go b/aws/version.go index 0935f02b2c9..bb797c5604b 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "0.16.0" +const SDKVersion = "0.17.0" diff --git a/internal/awstesting/cmd/op_crawler/create_service.go b/internal/awstesting/cmd/op_crawler/create_service.go index ecc177f443b..fb984b7064f 100644 --- a/internal/awstesting/cmd/op_crawler/create_service.go +++ b/internal/awstesting/cmd/op_crawler/create_service.go @@ -55,6 +55,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/costandusagereportservice" "github.com/aws/aws-sdk-go-v2/service/costexplorer" "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + "github.com/aws/aws-sdk-go-v2/service/dataexchange" "github.com/aws/aws-sdk-go-v2/service/datapipeline" "github.com/aws/aws-sdk-go-v2/service/datasync" "github.com/aws/aws-sdk-go-v2/service/dax" @@ -133,6 +134,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/mediastoredata" "github.com/aws/aws-sdk-go-v2/service/mediatailor" "github.com/aws/aws-sdk-go-v2/service/migrationhub" + "github.com/aws/aws-sdk-go-v2/service/migrationhubconfig" "github.com/aws/aws-sdk-go-v2/service/mobile" "github.com/aws/aws-sdk-go-v2/service/mobileanalytics" "github.com/aws/aws-sdk-go-v2/service/mq" @@ -176,6 +178,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/servicediscovery" "github.com/aws/aws-sdk-go-v2/service/servicequotas" "github.com/aws/aws-sdk-go-v2/service/ses" + "github.com/aws/aws-sdk-go-v2/service/sesv2" "github.com/aws/aws-sdk-go-v2/service/sfn" "github.com/aws/aws-sdk-go-v2/service/shield" "github.com/aws/aws-sdk-go-v2/service/signer" @@ -265,6 +268,7 @@ func createServices(cfg aws.Config) []service { {name: "costandusagereportservice", value: reflect.ValueOf(costandusagereportservice.New(cfg))}, {name: "costexplorer", value: reflect.ValueOf(costexplorer.New(cfg))}, {name: "databasemigrationservice", value: reflect.ValueOf(databasemigrationservice.New(cfg))}, + {name: "dataexchange", value: reflect.ValueOf(dataexchange.New(cfg))}, {name: "datapipeline", value: reflect.ValueOf(datapipeline.New(cfg))}, {name: "datasync", value: reflect.ValueOf(datasync.New(cfg))}, {name: "dax", value: reflect.ValueOf(dax.New(cfg))}, @@ -343,6 +347,7 @@ func createServices(cfg aws.Config) []service { {name: "mediastoredata", value: reflect.ValueOf(mediastoredata.New(cfg))}, {name: "mediatailor", value: reflect.ValueOf(mediatailor.New(cfg))}, {name: "migrationhub", value: reflect.ValueOf(migrationhub.New(cfg))}, + {name: "migrationhubconfig", value: reflect.ValueOf(migrationhubconfig.New(cfg))}, {name: "mobile", value: reflect.ValueOf(mobile.New(cfg))}, {name: "mobileanalytics", value: reflect.ValueOf(mobileanalytics.New(cfg))}, {name: "mq", value: reflect.ValueOf(mq.New(cfg))}, @@ -386,6 +391,7 @@ func createServices(cfg aws.Config) []service { {name: "servicediscovery", value: reflect.ValueOf(servicediscovery.New(cfg))}, {name: "servicequotas", value: reflect.ValueOf(servicequotas.New(cfg))}, {name: "ses", value: reflect.ValueOf(ses.New(cfg))}, + {name: "sesv2", value: reflect.ValueOf(sesv2.New(cfg))}, {name: "sfn", value: reflect.ValueOf(sfn.New(cfg))}, {name: "shield", value: reflect.ValueOf(shield.New(cfg))}, {name: "signer", value: reflect.ValueOf(signer.New(cfg))}, diff --git a/models/apis/AWSMigrationHub/2017-05-31/api-2.json b/models/apis/AWSMigrationHub/2017-05-31/api-2.json index 4c1d52efc85..3eb3a228df7 100644 --- a/models/apis/AWSMigrationHub/2017-05-31/api-2.json +++ b/models/apis/AWSMigrationHub/2017-05-31/api-2.json @@ -6,6 +6,7 @@ "jsonVersion":"1.1", "protocol":"json", "serviceFullName":"AWS Migration Hub", + "serviceId":"Migration Hub", "signatureVersion":"v4", "targetPrefix":"AWSMigrationHub", "uid":"AWSMigrationHub-2017-05-31" @@ -26,7 +27,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "AssociateDiscoveredResource":{ @@ -45,7 +47,8 @@ {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, {"shape":"PolicyErrorException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "CreateProgressUpdateStream":{ @@ -62,7 +65,8 @@ {"shape":"ServiceUnavailableException"}, {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, - {"shape":"InvalidInputException"} + {"shape":"InvalidInputException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DeleteProgressUpdateStream":{ @@ -80,7 +84,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeApplicationState":{ @@ -97,7 +102,8 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, {"shape":"PolicyErrorException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeMigrationTask":{ @@ -113,7 +119,8 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DisassociateCreatedArtifact":{ @@ -131,7 +138,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DisassociateDiscoveredResource":{ @@ -149,7 +157,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ImportMigrationTask":{ @@ -167,7 +176,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ListCreatedArtifacts":{ @@ -183,7 +193,8 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ListDiscoveredResources":{ @@ -199,7 +210,8 @@ {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ListMigrationTasks":{ @@ -216,7 +228,8 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InvalidInputException"}, {"shape":"PolicyErrorException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ListProgressUpdateStreams":{ @@ -231,7 +244,8 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerError"}, {"shape":"ServiceUnavailableException"}, - {"shape":"InvalidInputException"} + {"shape":"InvalidInputException"}, + {"shape":"HomeRegionNotSetException"} ] }, "NotifyApplicationState":{ @@ -250,7 +264,8 @@ {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, {"shape":"PolicyErrorException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "NotifyMigrationTaskState":{ @@ -268,7 +283,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "PutResourceAttributes":{ @@ -286,7 +302,8 @@ {"shape":"DryRunOperation"}, {"shape":"UnauthorizedOperation"}, {"shape":"InvalidInputException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] } }, @@ -497,6 +514,13 @@ "exception":true }, "ErrorMessage":{"type":"string"}, + "HomeRegionNotSetException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, "ImportMigrationTaskRequest":{ "type":"structure", "required":[ @@ -666,6 +690,7 @@ "members":{ "ApplicationId":{"shape":"ApplicationId"}, "Status":{"shape":"ApplicationStatus"}, + "UpdateDateTime":{"shape":"UpdateDateTime"}, "DryRun":{"shape":"DryRun"} } }, diff --git a/models/apis/AWSMigrationHub/2017-05-31/docs-2.json b/models/apis/AWSMigrationHub/2017-05-31/docs-2.json index 5cb5869d564..a085c8ea343 100644 --- a/models/apis/AWSMigrationHub/2017-05-31/docs-2.json +++ b/models/apis/AWSMigrationHub/2017-05-31/docs-2.json @@ -1,15 +1,15 @@ { "version": "2.0", - "service": "

The AWS Migration Hub API methods help to obtain server and application migration status and integrate your resource-specific migration tool by providing a programmatic interface to Migration Hub.

", + "service": "

The AWS Migration Hub API methods help to obtain server and application migration status and integrate your resource-specific migration tool by providing a programmatic interface to Migration Hub.

Remember that you must set your AWS Migration Hub home region before you call any of these APIs, or a HomeRegionNotSetException error will be returned. Also, you must make the API calls while in your home region.

", "operations": { "AssociateCreatedArtifact": "

Associates a created artifact of an AWS cloud resource, the target receiving the migration, with the migration task performed by a migration tool. This API has the following traits:

", - "AssociateDiscoveredResource": "

Associates a discovered resource ID from Application Discovery Service (ADS) with a migration task.

", + "AssociateDiscoveredResource": "

Associates a discovered resource ID from Application Discovery Service with a migration task.

", "CreateProgressUpdateStream": "

Creates a progress update stream which is an AWS resource used for access control as well as a namespace for migration task names that is implicitly linked to your AWS account. It must uniquely identify the migration tool as it is used for all updates made by the tool; however, it does not need to be unique for each AWS account because it is scoped to the AWS account.

", - "DeleteProgressUpdateStream": "

Deletes a progress update stream, including all of its tasks, which was previously created as an AWS resource used for access control. This API has the following traits:

", + "DeleteProgressUpdateStream": "

Deletes a progress update stream, including all of its tasks, which was previously created as an AWS resource used for access control. This API has the following traits:

", "DescribeApplicationState": "

Gets the migration status of an application.

", "DescribeMigrationTask": "

Retrieves a list of all attributes associated with a specific migration task.

", "DisassociateCreatedArtifact": "

Disassociates a created artifact of an AWS resource with a migration task performed by a migration tool that was previously associated. This API has the following traits:

", - "DisassociateDiscoveredResource": "

Disassociate an Application Discovery Service (ADS) discovered resource from a migration task.

", + "DisassociateDiscoveredResource": "

Disassociate an Application Discovery Service discovered resource from a migration task.

", "ImportMigrationTask": "

Registers a new migration task which represents a server, database, etc., being migrated to AWS by a migration tool.

This API is a prerequisite to calling the NotifyMigrationTaskState API as the migration tool must first register the migration task with Migration Hub.

", "ListCreatedArtifacts": "

Lists the created artifacts attached to a given migration task in an update stream. This API has the following traits:

", "ListDiscoveredResources": "

Lists discovered resources associated with the given MigrationTask.

", @@ -17,7 +17,7 @@ "ListProgressUpdateStreams": "

Lists progress update streams associated with the user account making this call.

", "NotifyApplicationState": "

Sets the migration state of an application. For a given application identified by the value passed to ApplicationId, its status is set or updated by passing one of three values to Status: NOT_STARTED | IN_PROGRESS | COMPLETED.

", "NotifyMigrationTaskState": "

Notifies Migration Hub of the current status, progress, or other detail regarding a migration task. This API has the following traits:

", - "PutResourceAttributes": "

Provides identifying details of the resource being migrated so that it can be associated in the Application Discovery Service (ADS)'s repository. This association occurs asynchronously after PutResourceAttributes returns.

Because this is an asynchronous call, it will always return 200, whether an association occurs or not. To confirm if an association was found based on the provided details, call ListDiscoveredResources.

" + "PutResourceAttributes": "

Provides identifying details of the resource being migrated so that it can be associated in the Application Discovery Service repository. This association occurs asynchronously after PutResourceAttributes returns.

Because this is an asynchronous call, it will always return 200, whether an association occurs or not. To confirm if an association was found based on the provided details, call ListDiscoveredResources.

" }, "shapes": { "AccessDeniedException": { @@ -28,8 +28,8 @@ "ApplicationId": { "base": null, "refs": { - "DescribeApplicationStateRequest$ApplicationId": "

The configurationId in ADS that uniquely identifies the grouped application.

", - "NotifyApplicationStateRequest$ApplicationId": "

The configurationId in ADS that uniquely identifies the grouped application.

" + "DescribeApplicationStateRequest$ApplicationId": "

The configurationId in Application Discovery Service that uniquely identifies the grouped application.

", + "NotifyApplicationStateRequest$ApplicationId": "

The configurationId in Application Discovery Service that uniquely identifies the grouped application.

" } }, "ApplicationStatus": { @@ -62,8 +62,8 @@ "ConfigurationId": { "base": null, "refs": { - "DisassociateDiscoveredResourceRequest$ConfigurationId": "

ConfigurationId of the ADS resource to be disassociated.

", - "DiscoveredResource$ConfigurationId": "

The configurationId in ADS that uniquely identifies the on-premise resource.

" + "DisassociateDiscoveredResourceRequest$ConfigurationId": "

ConfigurationId of the Application Discovery Service resource to be disassociated.

", + "DiscoveredResource$ConfigurationId": "

The configurationId in Application Discovery Service that uniquely identifies the on-premise resource.

" } }, "CreateProgressUpdateStreamRequest": { @@ -196,6 +196,7 @@ "refs": { "AccessDeniedException$Message": null, "DryRunOperation$Message": null, + "HomeRegionNotSetException$Message": null, "InternalServerError$Message": null, "InvalidInputException$Message": null, "PolicyErrorException$Message": null, @@ -204,6 +205,11 @@ "UnauthorizedOperation$Message": null } }, + "HomeRegionNotSetException": { + "base": "

The home region is not set. Set the home region to continue.

", + "refs": { + } + }, "ImportMigrationTaskRequest": { "base": null, "refs": { @@ -215,7 +221,7 @@ } }, "InternalServerError": { - "base": "

Exception raised when there is an internal, configuration, or dependency error encountered.

", + "base": "

Exception raised when an internal, configuration, or dependency error is encountered.

", "refs": { } }, @@ -227,7 +233,7 @@ "LatestResourceAttributeList": { "base": null, "refs": { - "MigrationTask$ResourceAttributeList": "

" + "MigrationTask$ResourceAttributeList": "

Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service repository.

" } }, "ListCreatedArtifactsRequest": { @@ -298,18 +304,18 @@ "MigrationTaskName": { "base": null, "refs": { - "AssociateCreatedArtifactRequest$MigrationTaskName": "

Unique identifier that references the migration task.

", - "AssociateDiscoveredResourceRequest$MigrationTaskName": "

The identifier given to the MigrationTask.

", - "DescribeMigrationTaskRequest$MigrationTaskName": "

The identifier given to the MigrationTask.

", - "DisassociateCreatedArtifactRequest$MigrationTaskName": "

Unique identifier that references the migration task to be disassociated with the artifact.

", - "DisassociateDiscoveredResourceRequest$MigrationTaskName": "

The identifier given to the MigrationTask.

", - "ImportMigrationTaskRequest$MigrationTaskName": "

Unique identifier that references the migration task.

", - "ListCreatedArtifactsRequest$MigrationTaskName": "

Unique identifier that references the migration task.

", - "ListDiscoveredResourcesRequest$MigrationTaskName": "

The name of the MigrationTask.

", - "MigrationTask$MigrationTaskName": "

Unique identifier that references the migration task.

", - "MigrationTaskSummary$MigrationTaskName": "

Unique identifier that references the migration task.

", - "NotifyMigrationTaskStateRequest$MigrationTaskName": "

Unique identifier that references the migration task.

", - "PutResourceAttributesRequest$MigrationTaskName": "

Unique identifier that references the migration task.

" + "AssociateCreatedArtifactRequest$MigrationTaskName": "

Unique identifier that references the migration task. Do not store personal data in this field.

", + "AssociateDiscoveredResourceRequest$MigrationTaskName": "

The identifier given to the MigrationTask. Do not store personal data in this field.

", + "DescribeMigrationTaskRequest$MigrationTaskName": "

The identifier given to the MigrationTask. Do not store personal data in this field.

", + "DisassociateCreatedArtifactRequest$MigrationTaskName": "

Unique identifier that references the migration task to be disassociated with the artifact. Do not store personal data in this field.

", + "DisassociateDiscoveredResourceRequest$MigrationTaskName": "

The identifier given to the MigrationTask. Do not store personal data in this field.

", + "ImportMigrationTaskRequest$MigrationTaskName": "

Unique identifier that references the migration task. Do not store personal data in this field.

", + "ListCreatedArtifactsRequest$MigrationTaskName": "

Unique identifier that references the migration task. Do not store personal data in this field.

", + "ListDiscoveredResourcesRequest$MigrationTaskName": "

The name of the MigrationTask. Do not store personal data in this field.

", + "MigrationTask$MigrationTaskName": "

Unique identifier that references the migration task. Do not store personal data in this field.

", + "MigrationTaskSummary$MigrationTaskName": "

Unique identifier that references the migration task. Do not store personal data in this field.

", + "NotifyMigrationTaskStateRequest$MigrationTaskName": "

Unique identifier that references the migration task. Do not store personal data in this field.

", + "PutResourceAttributesRequest$MigrationTaskName": "

Unique identifier that references the migration task. Do not store personal data in this field.

" } }, "MigrationTaskSummary": { @@ -351,14 +357,14 @@ } }, "PolicyErrorException": { - "base": "

Exception raised when there are problems accessing ADS (Application Discovery Service); most likely due to a misconfigured policy or the migrationhub-discovery role is missing or not configured correctly.

", + "base": "

Exception raised when there are problems accessing Application Discovery Service (Application Discovery Service); most likely due to a misconfigured policy or the migrationhub-discovery role is missing or not configured correctly.

", "refs": { } }, "ProgressPercent": { "base": null, "refs": { - "MigrationTaskSummary$ProgressPercent": "

", + "MigrationTaskSummary$ProgressPercent": "

Indication of the percentage completion of the task.

", "Task$ProgressPercent": "

Indication of the percentage completion of the task.

" } }, @@ -367,18 +373,18 @@ "refs": { "AssociateCreatedArtifactRequest$ProgressUpdateStream": "

The name of the ProgressUpdateStream.

", "AssociateDiscoveredResourceRequest$ProgressUpdateStream": "

The name of the ProgressUpdateStream.

", - "CreateProgressUpdateStreamRequest$ProgressUpdateStreamName": "

The name of the ProgressUpdateStream.

", - "DeleteProgressUpdateStreamRequest$ProgressUpdateStreamName": "

The name of the ProgressUpdateStream.

", + "CreateProgressUpdateStreamRequest$ProgressUpdateStreamName": "

The name of the ProgressUpdateStream. Do not store personal data in this field.

", + "DeleteProgressUpdateStreamRequest$ProgressUpdateStreamName": "

The name of the ProgressUpdateStream. Do not store personal data in this field.

", "DescribeMigrationTaskRequest$ProgressUpdateStream": "

The name of the ProgressUpdateStream.

", "DisassociateCreatedArtifactRequest$ProgressUpdateStream": "

The name of the ProgressUpdateStream.

", "DisassociateDiscoveredResourceRequest$ProgressUpdateStream": "

The name of the ProgressUpdateStream.

", - "ImportMigrationTaskRequest$ProgressUpdateStream": "

The name of the ProgressUpdateStream.

", + "ImportMigrationTaskRequest$ProgressUpdateStream": "

The name of the ProgressUpdateStream. >

", "ListCreatedArtifactsRequest$ProgressUpdateStream": "

The name of the ProgressUpdateStream.

", "ListDiscoveredResourcesRequest$ProgressUpdateStream": "

The name of the ProgressUpdateStream.

", "MigrationTask$ProgressUpdateStream": "

A name that identifies the vendor of the migration tool being used.

", "MigrationTaskSummary$ProgressUpdateStream": "

An AWS resource used for access control. It should uniquely identify the migration tool as it is used for all updates made by the tool.

", "NotifyMigrationTaskStateRequest$ProgressUpdateStream": "

The name of the ProgressUpdateStream.

", - "ProgressUpdateStreamSummary$ProgressUpdateStreamName": "

The name of the ProgressUpdateStream.

", + "ProgressUpdateStreamSummary$ProgressUpdateStreamName": "

The name of the ProgressUpdateStream. Do not store personal data in this field.

", "PutResourceAttributesRequest$ProgressUpdateStream": "

The name of the ProgressUpdateStream.

" } }, @@ -414,7 +420,7 @@ "ResourceAttributeList": { "base": null, "refs": { - "PutResourceAttributesRequest$ResourceAttributeList": "

Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service (ADS)'s repository.

Takes the object array of ResourceAttribute where the Type field is reserved for the following values: IPV4_ADDRESS | IPV6_ADDRESS | MAC_ADDRESS | FQDN | VM_MANAGER_ID | VM_MANAGED_OBJECT_REFERENCE | VM_NAME | VM_PATH | BIOS_ID | MOTHERBOARD_SERIAL_NUMBER where the identifying value can be a string up to 256 characters.

" + "PutResourceAttributesRequest$ResourceAttributeList": "

Information about the resource that is being migrated. This data will be used to map the task to a resource in the Application Discovery Service repository.

Takes the object array of ResourceAttribute where the Type field is reserved for the following values: IPV4_ADDRESS | IPV6_ADDRESS | MAC_ADDRESS | FQDN | VM_MANAGER_ID | VM_MANAGED_OBJECT_REFERENCE | VM_NAME | VM_PATH | BIOS_ID | MOTHERBOARD_SERIAL_NUMBER where the identifying value can be a string up to 256 characters.

" } }, "ResourceAttributeType": { @@ -436,7 +442,7 @@ } }, "ResourceNotFoundException": { - "base": "

Exception raised when the request references a resource (ADS configuration, update stream, migration task, etc.) that does not exist in ADS (Application Discovery Service) or in Migration Hub's repository.

", + "base": "

Exception raised when the request references a resource (Application Discovery Service configuration, update stream, migration task, etc.) that does not exist in Application Discovery Service (Application Discovery Service) or in Migration Hub's repository.

", "refs": { } }, @@ -490,6 +496,7 @@ "DescribeApplicationStateResult$LastUpdatedTime": "

The timestamp when the application status was last updated.

", "MigrationTask$UpdateDateTime": "

The timestamp when the task was gathered.

", "MigrationTaskSummary$UpdateDateTime": "

The timestamp when the task was gathered.

", + "NotifyApplicationStateRequest$UpdateDateTime": "

The timestamp when the application state changed.

", "NotifyMigrationTaskStateRequest$UpdateDateTime": "

The timestamp when the task was gathered.

" } } diff --git a/models/apis/AWSMigrationHub/2017-05-31/paginators-1.json b/models/apis/AWSMigrationHub/2017-05-31/paginators-1.json index 5677bd8e4a2..2d785c8af28 100644 --- a/models/apis/AWSMigrationHub/2017-05-31/paginators-1.json +++ b/models/apis/AWSMigrationHub/2017-05-31/paginators-1.json @@ -1,4 +1,28 @@ { "pagination": { + "ListCreatedArtifacts": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "CreatedArtifactList" + }, + "ListDiscoveredResources": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "DiscoveredResourceList" + }, + "ListMigrationTasks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "MigrationTaskSummaryList" + }, + "ListProgressUpdateStreams": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ProgressUpdateStreamSummaryList" + } } -} +} \ No newline at end of file diff --git a/models/apis/autoscaling/2011-01-01/api-2.json b/models/apis/autoscaling/2011-01-01/api-2.json index bb8a88957bd..512f6095f2a 100644 --- a/models/apis/autoscaling/2011-01-01/api-2.json +++ b/models/apis/autoscaling/2011-01-01/api-2.json @@ -939,7 +939,8 @@ "Tags":{"shape":"TagDescriptionList"}, "TerminationPolicies":{"shape":"TerminationPolicies"}, "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"}, - "ServiceLinkedRoleARN":{"shape":"ResourceName"} + "ServiceLinkedRoleARN":{"shape":"ResourceName"}, + "MaxInstanceLifetime":{"shape":"MaxInstanceLifetime"} } }, "AutoScalingGroupDesiredCapacity":{"type":"integer"}, @@ -981,13 +982,15 @@ ], "members":{ "InstanceId":{"shape":"XmlStringMaxLen19"}, + "InstanceType":{"shape":"XmlStringMaxLen255"}, "AutoScalingGroupName":{"shape":"XmlStringMaxLen255"}, "AvailabilityZone":{"shape":"XmlStringMaxLen255"}, "LifecycleState":{"shape":"XmlStringMaxLen32"}, "HealthStatus":{"shape":"XmlStringMaxLen32"}, "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, "LaunchTemplate":{"shape":"LaunchTemplateSpecification"}, - "ProtectedFromScaleIn":{"shape":"InstanceProtected"} + "ProtectedFromScaleIn":{"shape":"InstanceProtected"}, + "WeightedCapacity":{"shape":"XmlStringMaxLen32"} } }, "AutoScalingInstances":{ @@ -1128,7 +1131,8 @@ "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"}, "LifecycleHookSpecificationList":{"shape":"LifecycleHookSpecifications"}, "Tags":{"shape":"Tags"}, - "ServiceLinkedRoleARN":{"shape":"ResourceName"} + "ServiceLinkedRoleARN":{"shape":"ResourceName"}, + "MaxInstanceLifetime":{"shape":"MaxInstanceLifetime"} } }, "CreateLaunchConfigurationType":{ @@ -1564,12 +1568,14 @@ ], "members":{ "InstanceId":{"shape":"XmlStringMaxLen19"}, + "InstanceType":{"shape":"XmlStringMaxLen255"}, "AvailabilityZone":{"shape":"XmlStringMaxLen255"}, "LifecycleState":{"shape":"LifecycleState"}, "HealthStatus":{"shape":"XmlStringMaxLen32"}, "LaunchConfigurationName":{"shape":"XmlStringMaxLen255"}, "LaunchTemplate":{"shape":"LaunchTemplateSpecification"}, - "ProtectedFromScaleIn":{"shape":"InstanceProtected"} + "ProtectedFromScaleIn":{"shape":"InstanceProtected"}, + "WeightedCapacity":{"shape":"XmlStringMaxLen32"} } }, "InstanceIds":{ @@ -1687,7 +1693,8 @@ "LaunchTemplateOverrides":{ "type":"structure", "members":{ - "InstanceType":{"shape":"XmlStringMaxLen255"} + "InstanceType":{"shape":"XmlStringMaxLen255"}, + "WeightedCapacity":{"shape":"XmlStringMaxLen32"} } }, "LaunchTemplateSpecification":{ @@ -1804,6 +1811,7 @@ "type":"list", "member":{"shape":"LoadBalancerTargetGroupState"} }, + "MaxInstanceLifetime":{"type":"integer"}, "MaxNumberOfAutoScalingGroups":{"type":"integer"}, "MaxNumberOfLaunchConfigurations":{"type":"integer"}, "MaxRecords":{"type":"integer"}, @@ -2387,7 +2395,8 @@ "VPCZoneIdentifier":{"shape":"XmlStringMaxLen2047"}, "TerminationPolicies":{"shape":"TerminationPolicies"}, "NewInstancesProtectedFromScaleIn":{"shape":"InstanceProtected"}, - "ServiceLinkedRoleARN":{"shape":"ResourceName"} + "ServiceLinkedRoleARN":{"shape":"ResourceName"}, + "MaxInstanceLifetime":{"shape":"MaxInstanceLifetime"} } }, "Values":{ diff --git a/models/apis/autoscaling/2011-01-01/docs-2.json b/models/apis/autoscaling/2011-01-01/docs-2.json index 99b7bf6b277..cf99a3c32be 100644 --- a/models/apis/autoscaling/2011-01-01/docs-2.json +++ b/models/apis/autoscaling/2011-01-01/docs-2.json @@ -3,7 +3,7 @@ "service": "Amazon EC2 Auto Scaling

Amazon EC2 Auto Scaling is designed to automatically launch or terminate EC2 instances based on user-defined scaling policies, scheduled actions, and health checks. Use this service with AWS Auto Scaling, Amazon CloudWatch, and Elastic Load Balancing.

For more information, including information about granting IAM users required permissions for Amazon EC2 Auto Scaling actions, see the Amazon EC2 Auto Scaling User Guide.

", "operations": { "AttachInstances": "

Attaches one or more EC2 instances to the specified Auto Scaling group.

When you attach instances, Amazon EC2 Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.

If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups.

For more information, see Attach EC2 Instances to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

", - "AttachLoadBalancerTargetGroups": "

Attaches one or more target groups to the specified Auto Scaling group.

To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups.

With Application Load Balancers and Network Load Balancers, instances are registered as targets with a target group. With Classic Load Balancers, instances are registered with the load balancer. For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

", + "AttachLoadBalancerTargetGroups": "

Attaches one or more target groups to the specified Auto Scaling group.

To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups.

With Application Load Balancers and Network Load Balancers, instances are registered as targets with a target group. With Classic Load Balancers, instances are registered with the load balancer. For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

", "AttachLoadBalancers": "

Attaches one or more Classic Load Balancers to the specified Auto Scaling group.

To attach an Application Load Balancer or a Network Load Balancer instead, see AttachLoadBalancerTargetGroups.

To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers.

For more information, see Attaching a Load Balancer to Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

", "BatchDeleteScheduledAction": "

Deletes one or more scheduled actions for the specified Auto Scaling group.

", "BatchPutScheduledUpdateGroupAction": "

Creates or updates one or more scheduled scaling actions for an Auto Scaling group. If you leave a parameter unspecified when updating a scheduled scaling action, the corresponding value remains unchanged.

", @@ -33,7 +33,7 @@ "DescribePolicies": "

Describes the policies for the specified Auto Scaling group.

", "DescribeScalingActivities": "

Describes one or more scaling activities for the specified Auto Scaling group.

", "DescribeScalingProcessTypes": "

Describes the scaling process types for use with ResumeProcesses and SuspendProcesses.

", - "DescribeScheduledActions": "

Describes the actions scheduled for your Auto Scaling group that haven't run or that have not reached their end time. To describe the actions that have already run, use DescribeScalingActivities.

", + "DescribeScheduledActions": "

Describes the actions scheduled for your Auto Scaling group that haven't run or that have not reached their end time. To describe the actions that have already run, use DescribeScalingActivities.

", "DescribeTags": "

Describes the specified tags.

You can use filters to limit the results. For example, you can query for the tags for a specific Auto Scaling group. You can specify multiple values for a filter. A tag must match at least one of the specified values for it to be included in the results.

You can also specify multiple filters. The result includes information for a particular tag only if it matches all the filters. If there's no match, no special message is returned.

", "DescribeTerminationPolicyTypes": "

Describes the termination policies supported by Amazon EC2 Auto Scaling.

For more information, see Controlling Which Auto Scaling Instances Terminate During Scale In in the Amazon EC2 Auto Scaling User Guide.

", "DetachInstances": "

Removes one or more instances from the specified Auto Scaling group.

After the instances are detached, you can manage them independent of the Auto Scaling group.

If you do not specify the option to decrement the desired capacity, Amazon EC2 Auto Scaling launches instances to replace the ones that are detached.

If there is a Classic Load Balancer attached to the Auto Scaling group, the instances are deregistered from the load balancer. If there are target groups attached to the Auto Scaling group, the instances are deregistered from the target groups.

For more information, see Detach EC2 Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

", @@ -41,9 +41,9 @@ "DetachLoadBalancers": "

Detaches one or more Classic Load Balancers from the specified Auto Scaling group.

This operation detaches only Classic Load Balancers. If you have Application Load Balancers or Network Load Balancers, use DetachLoadBalancerTargetGroups instead.

When you detach a load balancer, it enters the Removing state while deregistering the instances in the group. When all instances are deregistered, then you can no longer describe the load balancer using DescribeLoadBalancers. The instances remain running.

", "DisableMetricsCollection": "

Disables group metrics for the specified Auto Scaling group.

", "EnableMetricsCollection": "

Enables group metrics for the specified Auto Scaling group. For more information, see Monitoring Your Auto Scaling Groups and Instances in the Amazon EC2 Auto Scaling User Guide.

", - "EnterStandby": "

Moves the specified instances into the standby state.

For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

", + "EnterStandby": "

Moves the specified instances into the standby state.

If you choose to decrement the desired capacity of the Auto Scaling group, the instances can enter standby as long as the desired capacity of the Auto Scaling group after the instances are placed into standby is equal to or greater than the minimum capacity of the group.

If you choose not to decrement the desired capacity of the Auto Scaling group, the Auto Scaling group launches new instances to replace the instances on standby.

For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

", "ExecutePolicy": "

Executes the specified policy.

", - "ExitStandby": "

Moves the specified instances out of the standby state.

For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

", + "ExitStandby": "

Moves the specified instances out of the standby state.

After you put the instances back in service, the desired capacity is incremented.

For more information, see Temporarily Removing Instances from Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

", "PutLifecycleHook": "

Creates or updates a lifecycle hook for the specified Auto Scaling group.

A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an instance when the instance launches (before it is put into service) or as the instance terminates (before it is fully terminated).

This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:

  1. (Optional) Create a Lambda function and a rule that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.

  2. (Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.

  3. Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.

  4. If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using RecordLifecycleActionHeartbeat.

  5. If you finish before the timeout period ends, complete the lifecycle action using CompleteLifecycleAction.

For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks in the Amazon EC2 Auto Scaling User Guide.

If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.

You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. If you are no longer using a lifecycle hook, you can delete it using DeleteLifecycleHook.

", "PutNotificationConfiguration": "

Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.

This configuration overwrites any existing configuration.

For more information, see Getting Amazon SNS Notifications When Your Auto Scaling Group Scales in the Amazon EC2 Auto Scaling User Guide.

", "PutScalingPolicy": "

Creates or updates a scaling policy for an Auto Scaling group. To update an existing scaling policy, use the existing policy name and set the parameters to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.

For more information about using scaling policies to scale your Auto Scaling group automatically, see Dynamic Scaling in the Amazon EC2 Auto Scaling User Guide.

", @@ -55,7 +55,7 @@ "SetInstanceProtection": "

Updates the instance protection settings of the specified instances.

For more information about preventing instances that are part of an Auto Scaling group from terminating on scale in, see Instance Protection in the Amazon EC2 Auto Scaling User Guide.

", "SuspendProcesses": "

Suspends the specified automatic scaling processes, or all processes, for the specified Auto Scaling group.

If you suspend either the Launch or Terminate process types, it can prevent other process types from functioning properly.

To resume processes that have been suspended, use ResumeProcesses.

For more information, see Suspending and Resuming Scaling Processes in the Amazon EC2 Auto Scaling User Guide.

", "TerminateInstanceInAutoScalingGroup": "

Terminates the specified instance and optionally adjusts the desired group size.

This call simply makes a termination request. The instance is not terminated immediately.

", - "UpdateAutoScalingGroup": "

Updates the configuration for the specified Auto Scaling group.

To update an Auto Scaling group, specify the name of the group and the parameter that you want to change. Any parameters that you don't specify are not changed by this update request. The new settings take effect on any scaling activities after this call returns. Scaling activities that are currently in progress aren't affected.

If you associate a new launch configuration or template with an Auto Scaling group, all new instances will get the updated configuration. Existing instances continue to run with the configuration that they were originally launched with. When you update a group to specify a mixed instances policy instead of a launch configuration or template, existing instances may be replaced to match the new purchasing options that you specified in the policy. For example, if the group currently has 100% On-Demand capacity and the policy specifies 50% Spot capacity, this means that half of your instances will be gradually terminated and relaunched as Spot Instances. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones, so that updating your group does not compromise the performance or availability of your application.

Note the following about changing DesiredCapacity, MaxSize, or MinSize:

To see which parameters have been set, use DescribeAutoScalingGroups. You can also view the scaling policies for an Auto Scaling group using DescribePolicies. If the group has scaling policies, you can update them using PutScalingPolicy.

" + "UpdateAutoScalingGroup": "

Updates the configuration for the specified Auto Scaling group.

To update an Auto Scaling group, specify the name of the group and the parameter that you want to change. Any parameters that you don't specify are not changed by this update request. The new settings take effect on any scaling activities after this call returns.

If you associate a new launch configuration or template with an Auto Scaling group, all new instances will get the updated configuration. Existing instances continue to run with the configuration that they were originally launched with. When you update a group to specify a mixed instances policy instead of a launch configuration or template, existing instances may be replaced to match the new purchasing options that you specified in the policy. For example, if the group currently has 100% On-Demand capacity and the policy specifies 50% Spot capacity, this means that half of your instances will be gradually terminated and relaunched as Spot Instances. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones, so that updating your group does not compromise the performance or availability of your application.

Note the following about changing DesiredCapacity, MaxSize, or MinSize:

To see which parameters have been set, use DescribeAutoScalingGroups. You can also view the scaling policies for an Auto Scaling group using DescribePolicies. If the group has scaling policies, you can update them using PutScalingPolicy.

" }, "shapes": { "Activities": { @@ -136,7 +136,7 @@ "base": null, "refs": { "CreateLaunchConfigurationType$AssociatePublicIpAddress": "

For Auto Scaling groups that are running in a virtual private cloud (VPC), specifies whether to assign a public IP address to the group's instances. If you specify true, each instance in the Auto Scaling group receives a unique public IP address. For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify this parameter, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

If the instance is launched into a default subnet, the default is to assign a public IP address, unless you disabled the option to assign a public IP address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IP address, unless you enabled the option to assign a public IP address on the subnet.

", - "LaunchConfiguration$AssociatePublicIpAddress": "

For Auto Scaling groups that are running in a VPC, specifies whether to assign a public IP address to the group's instances.

For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

" + "LaunchConfiguration$AssociatePublicIpAddress": "

For Auto Scaling groups that are running in a VPC, specifies whether to assign a public IP address to the group's instances.

For more information, see Launching Auto Scaling Instances in a VPC in the Amazon EC2 Auto Scaling User Guide.

" } }, "AttachInstancesQuery": { @@ -207,7 +207,7 @@ "AutoScalingGroupNames": { "base": null, "refs": { - "AutoScalingGroupNamesType$AutoScalingGroupNames": "

The names of the Auto Scaling groups. Each name can be a maximum of 1600 characters. By default, you can only specify up to 50 names. You can optionally increase this limit using the MaxRecords parameter.

If you omit this parameter, all Auto Scaling groups are described.

", + "AutoScalingGroupNamesType$AutoScalingGroupNames": "

The names of the Auto Scaling groups. Each name can be a maximum of 1600 characters. By default, you can only specify up to 50 names. You can optionally increase this limit using the MaxRecords parameter.

If you omit this parameter, all Auto Scaling groups are described.

", "DescribeNotificationConfigurationsType$AutoScalingGroupNames": "

The name of the Auto Scaling group.

" } }, @@ -289,7 +289,7 @@ "BlockDeviceEbsEncrypted": { "base": null, "refs": { - "Ebs$Encrypted": "

Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types. If your AMI uses encrypted volumes, you can also only launch it on supported instance types.

If you are creating a volume from a snapshot, you cannot specify an encryption value. Volumes that are created from encrypted snapshots are automatically encrypted, and volumes that are created from unencrypted snapshots are automatically unencrypted. By default, encrypted snapshots use the AWS managed CMK that is used for EBS encryption, but you can specify a custom CMK when you create the snapshot. The ability to encrypt a snapshot during copying also allows you to apply a new CMK to an already-encrypted snapshot. Volumes restored from the resulting copy are only accessible using the new CMK.

Enabling encryption by default results in all EBS volumes being encrypted with the AWS managed CMK or a customer managed CMK, whether or not the snapshot was encrypted.

For more information, see Using Encryption with EBS-Backed AMIs in the Amazon EC2 User Guide for Linux Instances and Required CMK Key Policy for Use with Encrypted Volumes in the Amazon EC2 Auto Scaling User Guide.

" + "Ebs$Encrypted": "

Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types. If your AMI uses encrypted volumes, you can also only launch it on supported instance types.

If you are creating a volume from a snapshot, you cannot specify an encryption value. Volumes that are created from encrypted snapshots are automatically encrypted, and volumes that are created from unencrypted snapshots are automatically unencrypted. By default, encrypted snapshots use the AWS managed CMK that is used for EBS encryption, but you can specify a custom CMK when you create the snapshot. The ability to encrypt a snapshot during copying also allows you to apply a new CMK to an already-encrypted snapshot. Volumes restored from the resulting copy are only accessible using the new CMK.

Enabling encryption by default results in all EBS volumes being encrypted with the AWS managed CMK or a customer managed CMK, whether or not the snapshot was encrypted.

For more information, see Using Encryption with EBS-Backed AMIs in the Amazon EC2 User Guide for Linux Instances and Required CMK Key Policy for Use with Encrypted Volumes in the Amazon EC2 Auto Scaling User Guide.

" } }, "BlockDeviceEbsIops": { @@ -301,13 +301,13 @@ "BlockDeviceEbsVolumeSize": { "base": null, "refs": { - "Ebs$VolumeSize": "

The volume size, in Gibibytes (GiB).

This can be a number from 1-1,024 for standard, 4-16,384 for io1, 1-16,384 for gp2, and 500-16,384 for st1 and sc1. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

Default: If you create a volume from a snapshot and you don't specify a volume size, the default is the snapshot size.

At least one of VolumeSize or SnapshotId is required.

" + "Ebs$VolumeSize": "

The volume size, in Gibibytes (GiB).

This can be a number from 1-1,024 for standard, 4-16,384 for io1, 1-16,384 for gp2, and 500-16,384 for st1 and sc1. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.

Default: If you create a volume from a snapshot and you don't specify a volume size, the default is the snapshot size.

At least one of VolumeSize or SnapshotId is required.

" } }, "BlockDeviceEbsVolumeType": { "base": null, "refs": { - "Ebs$VolumeType": "

The volume type, which can be standard for Magnetic, io1 for Provisioned IOPS SSD, gp2 for General Purpose SSD, st1 for Throughput Optimized HDD, or sc1 for Cold HDD. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances.

Valid values: standard | io1 | gp2 | st1 | sc1

" + "Ebs$VolumeType": "

The volume type, which can be standard for Magnetic, io1 for Provisioned IOPS SSD, gp2 for General Purpose SSD, st1 for Throughput Optimized HDD, or sc1 for Cold HDD. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances.

Valid Values: standard | io1 | gp2 | st1 | sc1

" } }, "BlockDeviceMapping": { @@ -326,8 +326,8 @@ "ClassicLinkVPCSecurityGroups": { "base": null, "refs": { - "CreateLaunchConfigurationType$ClassicLinkVPCSecurityGroups": "

The IDs of one or more security groups for the specified ClassicLink-enabled VPC. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify the ClassicLinkVPCId parameter, you must specify this parameter.

", - "LaunchConfiguration$ClassicLinkVPCSecurityGroups": "

The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId.

For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

" + "CreateLaunchConfigurationType$ClassicLinkVPCSecurityGroups": "

The IDs of one or more security groups for the specified ClassicLink-enabled VPC. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

If you specify the ClassicLinkVPCId parameter, you must specify this parameter.

", + "LaunchConfiguration$ClassicLinkVPCSecurityGroups": "

The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId.

For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

" } }, "CompleteLifecycleActionAnswer": { @@ -366,7 +366,7 @@ } }, "CustomizedMetricSpecification": { - "base": "

Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use with Amazon EC2 Auto Scaling.

To create your customized metric specification:

For more information about CloudWatch, see Amazon CloudWatch Concepts.

", + "base": "

Represents a CloudWatch metric of your choosing for a target tracking scaling policy to use with Amazon EC2 Auto Scaling.

To create your customized metric specification:

For more information about CloudWatch, see Amazon CloudWatch Concepts.

", "refs": { "TargetTrackingConfiguration$CustomizedMetricSpecification": "

A customized metric. You must specify either a predefined metric or a customized metric.

" } @@ -551,8 +551,8 @@ "EbsOptimized": { "base": null, "refs": { - "CreateLaunchConfigurationType$EbsOptimized": "

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

The default value is false.

", - "LaunchConfiguration$EbsOptimized": "

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false).

For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

" + "CreateLaunchConfigurationType$EbsOptimized": "

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

The default value is false.

", + "LaunchConfiguration$EbsOptimized": "

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false).

For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

" } }, "EnableMetricsCollectionQuery": { @@ -613,7 +613,7 @@ "FailedScheduledUpdateGroupActionRequests": { "base": null, "refs": { - "BatchDeleteScheduledActionAnswer$FailedScheduledActions": "

The names of the scheduled actions that could not be deleted, including an error message.

", + "BatchDeleteScheduledActionAnswer$FailedScheduledActions": "

The names of the scheduled actions that could not be deleted, including an error message.

", "BatchPutScheduledUpdateGroupActionAnswer$FailedScheduledUpdateGroupActions": "

The names of the scheduled actions that could not be created or updated, including an error message.

" } }, @@ -684,8 +684,8 @@ "InstanceMonitoring": { "base": "

Describes whether detailed monitoring is enabled for the Auto Scaling instances.

", "refs": { - "CreateLaunchConfigurationType$InstanceMonitoring": "

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

The default value is true (enabled).

When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

", - "LaunchConfiguration$InstanceMonitoring": "

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

" + "CreateLaunchConfigurationType$InstanceMonitoring": "

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

The default value is true (enabled).

When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

", + "LaunchConfiguration$InstanceMonitoring": "

Controls whether instances in this group are launched with detailed (true) or basic (false) monitoring.

For more information, see Configure Monitoring for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

" } }, "InstanceProtected": { @@ -705,9 +705,9 @@ } }, "InstancesDistribution": { - "base": "

Describes an instances distribution for an Auto Scaling group with MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity.

", + "base": "

Describes an instances distribution for an Auto Scaling group with MixedInstancesPolicy.

The instances distribution specifies the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity.

When you update SpotAllocationStrategy, SpotInstancePools, or SpotMaxPrice, this update action does not deploy any changes across the running Amazon EC2 instances in the group. Your existing Spot Instances continue to run as long as the maximum price for those instances is higher than the current Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

", "refs": { - "MixedInstancesPolicy$InstancesDistribution": "

The instances distribution to use.

If you leave this parameter unspecified when creating a mixed instances policy, the default values are used.

" + "MixedInstancesPolicy$InstancesDistribution": "

The instances distribution to use.

If you leave this parameter unspecified, the value for each parameter in InstancesDistribution uses a default value.

" } }, "InvalidNextToken": { @@ -749,7 +749,7 @@ } }, "LaunchTemplate": { - "base": "

Describes a launch template and overrides.

The overrides are used to override the instance type specified by the launch template with multiple instance types that can be used to launch On-Demand Instances and Spot Instances.

", + "base": "

Describes a launch template and overrides.

The overrides are used to override the instance type specified by the launch template with multiple instance types that can be used to launch On-Demand Instances and Spot Instances.

When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.

", "refs": { "MixedInstancesPolicy$LaunchTemplate": "

The launch template and instance types (overrides).

This parameter must be specified when creating a mixed instances policy.

" } @@ -761,7 +761,7 @@ } }, "LaunchTemplateOverrides": { - "base": "

Describes an override for a launch template.

", + "base": "

Describes an override for a launch template.

", "refs": { "Overrides$member": null } @@ -773,7 +773,7 @@ "AutoScalingInstanceDetails$LaunchTemplate": "

The launch template for the instance.

", "CreateAutoScalingGroupType$LaunchTemplate": "

The launch template to use to launch instances.

For more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference.

If you do not specify LaunchTemplate, you must specify one of the following parameters: InstanceId, LaunchConfigurationName, or MixedInstancesPolicy.

", "Instance$LaunchTemplate": "

The launch template for the instance.

", - "LaunchTemplate$LaunchTemplateSpecification": "

The launch template to use. You must specify either the launch template ID or launch template name in the request.

", + "LaunchTemplate$LaunchTemplateSpecification": "

The launch template to use. You must specify either the launch template ID or launch template name in the request.

", "UpdateAutoScalingGroupType$LaunchTemplate": "

The launch template and version to use to specify the updates. If you specify LaunchTemplate in your update request, you can't specify LaunchConfigurationName or MixedInstancesPolicy.

For more information, see LaunchTemplateSpecification in the Amazon EC2 Auto Scaling API Reference.

" } }, @@ -875,6 +875,14 @@ "DescribeLoadBalancerTargetGroupsResponse$LoadBalancerTargetGroups": "

Information about the target groups.

" } }, + "MaxInstanceLifetime": { + "base": null, + "refs": { + "AutoScalingGroup$MaxInstanceLifetime": "

The maximum amount of time, in seconds, that an instance can be in service.

Valid Range: Minimum value of 604800.

", + "CreateAutoScalingGroupType$MaxInstanceLifetime": "

The maximum amount of time, in seconds, that an instance can be in service.

Valid Range: Minimum value of 604800.

", + "UpdateAutoScalingGroupType$MaxInstanceLifetime": "

The maximum amount of time, in seconds, that an instance can be in service.

Valid Range: Minimum value of 604800.

" + } + }, "MaxNumberOfAutoScalingGroups": { "base": null, "refs": { @@ -890,7 +898,7 @@ "MaxRecords": { "base": null, "refs": { - "AutoScalingGroupNamesType$MaxRecords": "

The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.

", + "AutoScalingGroupNamesType$MaxRecords": "

The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.

", "DescribeAutoScalingInstancesType$MaxRecords": "

The maximum number of items to return with this call. The default value is 50 and the maximum value is 50.

", "DescribeLoadBalancerTargetGroupsRequest$MaxRecords": "

The maximum number of items to return with this call. The default value is 100 and the maximum value is 100.

", "DescribeLoadBalancersRequest$MaxRecords": "

The maximum number of items to return with this call. The default value is 100 and the maximum value is 100.

", @@ -981,7 +989,7 @@ "MetricType": { "base": null, "refs": { - "PredefinedMetricSpecification$PredefinedMetricType": "

The metric type.

" + "PredefinedMetricSpecification$PredefinedMetricType": "

The metric type. The following predefined metrics are available:

" } }, "MetricUnit": { @@ -1020,7 +1028,7 @@ "MixedInstancesPolicy": { "base": "

Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy as the top-level parameter instead of a launch configuration or template. For more information, see CreateAutoScalingGroup and UpdateAutoScalingGroup.

", "refs": { - "AutoScalingGroup$MixedInstancesPolicy": "

The mixed instances policy for the group.

", + "AutoScalingGroup$MixedInstancesPolicy": "

The mixed instances policy for the group.

", "CreateAutoScalingGroupType$MixedInstancesPolicy": "

An embedded object that specifies a mixed instances policy. The required parameters must be specified. If optional parameters are unspecified, their default values are used.

The policy includes parameters that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacity, but also the parameters that specify the instance configuration information—the launch template and instance types.

For more information, see MixedInstancesPolicy in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

You must specify one of the following parameters in your request: LaunchConfigurationName, LaunchTemplate, InstanceId, or MixedInstancesPolicy.

", "UpdateAutoScalingGroupType$MixedInstancesPolicy": "

An embedded object that specifies a mixed instances policy.

In your call to UpdateAutoScalingGroup, you can make changes to the policy that is specified. All optional parameters are left unchanged if not specified.

For more information, see MixedInstancesPolicy in the Amazon EC2 Auto Scaling API Reference and Auto Scaling Groups with Multiple Instance Types and Purchase Options in the Amazon EC2 Auto Scaling User Guide.

" } @@ -1071,19 +1079,19 @@ "OnDemandBaseCapacity": { "base": null, "refs": { - "InstancesDistribution$OnDemandBaseCapacity": "

The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.

The default value is 0. If you leave this parameter set to 0, On-Demand Instances are launched as a percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity setting.

" + "InstancesDistribution$OnDemandBaseCapacity": "

The minimum amount of the Auto Scaling group's capacity that must be fulfilled by On-Demand Instances. This base portion is provisioned first as your group scales.

Default if not set is 0. If you leave it set to 0, On-Demand Instances are launched as a percentage of the Auto Scaling group's desired capacity, per the OnDemandPercentageAboveBaseCapacity setting.

An update to this setting means a gradual replacement of instances to maintain the specified number of On-Demand Instances for your base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.

" } }, "OnDemandPercentageAboveBaseCapacity": { "base": null, "refs": { - "InstancesDistribution$OnDemandPercentageAboveBaseCapacity": "

Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity. The range is 0–100.

The default value is 100. If you leave this parameter set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.

" + "InstancesDistribution$OnDemandPercentageAboveBaseCapacity": "

Controls the percentages of On-Demand Instances and Spot Instances for your additional capacity beyond OnDemandBaseCapacity.

Default if not set is 100. If you leave it set to 100, the percentages are 100% for On-Demand Instances and 0% for Spot Instances.

An update to this setting means a gradual replacement of instances to maintain the percentage of On-Demand Instances for your additional capacity above the base capacity. When replacing instances, Amazon EC2 Auto Scaling launches new instances before terminating the old ones.

Valid Range: Minimum value of 0. Maximum value of 100.

" } }, "Overrides": { "base": null, "refs": { - "LaunchTemplate$Overrides": "

Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type. You must specify between 2 and 20 overrides.

" + "LaunchTemplate$Overrides": "

An optional setting. Any parameters that you specify override the same parameters in the launch template. Currently, the only supported override is instance type. You can specify between 1 and 20 instance types.

" } }, "PoliciesType": { @@ -1099,7 +1107,7 @@ "PolicyIncrement": { "base": null, "refs": { - "PutScalingPolicyType$ScalingAdjustment": "

The amount by which a simple scaling policy scales the Auto Scaling group in response to an alarm breach. The adjustment is based on the value that you specified in the AdjustmentType parameter (either an absolute number or a percentage). A positive value adds to the current capacity and a negative value subtracts from the current capacity. For exact capacity, you must specify a positive value.

Conditional: If you specify SimpleScaling for the policy type, you must specify this parameter. (Not used with any other policy type.)

", + "PutScalingPolicyType$ScalingAdjustment": "

The amount by which a simple scaling policy scales the Auto Scaling group in response to an alarm breach. The adjustment is based on the value that you specified in the AdjustmentType parameter (either an absolute number or a percentage). A positive value adds to the current capacity and a negative value subtracts from the current capacity. For exact capacity, you must specify a positive value.

Conditional: If you specify SimpleScaling for the policy type, you must specify this parameter. (Not used with any other policy type.)

", "ScalingPolicy$ScalingAdjustment": "

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

", "StepAdjustment$ScalingAdjustment": "

The amount by which to scale, based on the specified adjustment type. A positive value adds to the current capacity while a negative number removes from the current capacity.

" } @@ -1272,7 +1280,7 @@ "SetDesiredCapacityType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "SetInstanceProtectionQuery$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "UpdateAutoScalingGroupType$AutoScalingGroupName": "

The name of the Auto Scaling group.

", - "UpdateAutoScalingGroupType$LaunchConfigurationName": "

The name of the launch configuration. If you specify LaunchConfigurationName in your update request, you can't specify LaunchTemplate or MixedInstancesPolicy.

To update an Auto Scaling group with a launch configuration with InstanceMonitoring set to false, you must first disable the collection of group metrics. Otherwise, you get an error. If you have previously enabled the collection of group metrics, you can disable it using DisableMetricsCollection.

", + "UpdateAutoScalingGroupType$LaunchConfigurationName": "

The name of the launch configuration. If you specify LaunchConfigurationName in your update request, you can't specify LaunchTemplate or MixedInstancesPolicy.

", "UpdateAutoScalingGroupType$ServiceLinkedRoleARN": "

The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. For more information, see Service-Linked Roles in the Amazon EC2 Auto Scaling User Guide.

" } }, @@ -1317,13 +1325,13 @@ } }, "ScheduledUpdateGroupAction": { - "base": "

Describes a scheduled scaling action. Used in response to DescribeScheduledActions.

", + "base": "

Describes a scheduled scaling action. Used in response to DescribeScheduledActions.

", "refs": { "ScheduledUpdateGroupActions$member": null } }, "ScheduledUpdateGroupActionRequest": { - "base": "

Describes one or more scheduled scaling action updates for a specified Auto Scaling group. Used in combination with BatchPutScheduledUpdateGroupAction.

When updating a scheduled scaling action, all optional parameters are left unchanged if not specified.

", + "base": "

Describes one or more scheduled scaling action updates for a specified Auto Scaling group. Used in combination with BatchPutScheduledUpdateGroupAction.

When updating a scheduled scaling action, all optional parameters are left unchanged if not specified.

", "refs": { "ScheduledUpdateGroupActionRequests$member": null } @@ -1331,7 +1339,7 @@ "ScheduledUpdateGroupActionRequests": { "base": null, "refs": { - "BatchPutScheduledUpdateGroupActionType$ScheduledUpdateGroupActions": "

One or more scheduled actions. The maximum number allowed is 50.

" + "BatchPutScheduledUpdateGroupActionType$ScheduledUpdateGroupActions": "

One or more scheduled actions. The maximum number allowed is 50.

" } }, "ScheduledUpdateGroupActions": { @@ -1344,7 +1352,7 @@ "base": null, "refs": { "CreateLaunchConfigurationType$SecurityGroups": "

A list that contains the security groups to assign to the instances in the Auto Scaling group.

[EC2-VPC] Specify the security group IDs. For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

[EC2-Classic] Specify either the security group names or the security group IDs. For more information, see Amazon EC2 Security Groups in the Amazon EC2 User Guide for Linux Instances.

", - "LaunchConfiguration$SecurityGroups": "

A list that contains the security groups to assign to the instances in the Auto Scaling group.

For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

" + "LaunchConfiguration$SecurityGroups": "

A list that contains the security groups to assign to the instances in the Auto Scaling group.

For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

" } }, "ServiceLinkedRoleFailure": { @@ -1389,14 +1397,14 @@ "SpotInstancePools": { "base": null, "refs": { - "InstancesDistribution$SpotInstancePools": "

The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate. The range is 1–20. The default value is 2.

Valid only when the Spot allocation strategy is lowest-price.

" + "InstancesDistribution$SpotInstancePools": "

The number of Spot Instance pools across which to allocate your Spot Instances. The Spot pools are determined from the different instance types in the Overrides array of LaunchTemplate. Default if not set is 2.

Used only when the Spot allocation strategy is lowest-price.

Valid Range: Minimum value of 1. Maximum value of 20.

" } }, "SpotPrice": { "base": null, "refs": { - "CreateLaunchConfigurationType$SpotPrice": "

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price. For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

If a Spot price is set, then the Auto Scaling group will only launch instances when the Spot price has been met, regardless of the setting in the Auto Scaling group's DesiredCapacity.

When you change your Spot price by creating a new launch configuration, running instances will continue to run as long as the Spot price for those running instances is higher than the current Spot market price.

", - "LaunchConfiguration$SpotPrice": "

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot market price.

For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" + "CreateLaunchConfigurationType$SpotPrice": "

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

When you change your maximum price by creating a new launch configuration, running instances will continue to run as long as the maximum price for those running instances is higher than the current Spot price.

", + "LaunchConfiguration$SpotPrice": "

The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price.

For more information, see Launching Spot Instances in Your Auto Scaling Group in the Amazon EC2 Auto Scaling User Guide.

" } }, "StepAdjustment": { @@ -1511,7 +1519,7 @@ "LaunchConfiguration$CreatedTime": "

The creation date and time for the launch configuration.

", "PutScheduledUpdateGroupActionType$Time": "

This parameter is no longer used.

", "PutScheduledUpdateGroupActionType$StartTime": "

The date and time for this action to start, in YYYY-MM-DDThh:mm:ssZ format in UTC/GMT only and in quotes (for example, \"2019-06-01T00:00:00Z\").

If you specify Recurrence and StartTime, Amazon EC2 Auto Scaling performs the action at this time, and then performs the action based on the specified recurrence.

If you try to schedule your action in the past, Amazon EC2 Auto Scaling returns an error message.

", - "PutScheduledUpdateGroupActionType$EndTime": "

The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.

", + "PutScheduledUpdateGroupActionType$EndTime": "

The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling does not perform the action after this time.

", "ScheduledUpdateGroupAction$Time": "

This parameter is no longer used.

", "ScheduledUpdateGroupAction$StartTime": "

The date and time in UTC for this action to start. For example, \"2019-06-01T00:00:00Z\".

", "ScheduledUpdateGroupAction$EndTime": "

The date and time in UTC for the recurring schedule to end. For example, \"2019-06-01T00:00:00Z\".

", @@ -1554,8 +1562,8 @@ "DescribeTagsType$NextToken": "

The token for the next set of items to return. (You received this token from a previous call.)

", "FailedScheduledUpdateGroupActionRequest$ErrorMessage": "

The error message accompanying the error code.

", "Filter$Name": "

The name of the filter. The valid values are: \"auto-scaling-group\", \"key\", \"value\", and \"propagate-at-launch\".

", - "InstancesDistribution$OnDemandAllocationStrategy": "

Indicates how to allocate instance types to fulfill On-Demand capacity.

The only valid value is prioritized, which is also the default value. This strategy uses the order of instance type overrides for the LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.

", - "InstancesDistribution$SpotAllocationStrategy": "

Indicates how to allocate instances across Spot Instance pools.

If the allocation strategy is lowest-price, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. If the allocation strategy is capacity-optimized, the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.

The default Spot allocation strategy for calls that you make through the API, the AWS CLI, or the AWS SDKs is lowest-price. The default Spot allocation strategy for the AWS Management Console is capacity-optimized.

Valid values: lowest-price | capacity-optimized

", + "InstancesDistribution$OnDemandAllocationStrategy": "

Indicates how to allocate instance types to fulfill On-Demand capacity.

The only valid value is prioritized, which is also the default value. This strategy uses the order of instance type overrides for the LaunchTemplate to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.

", + "InstancesDistribution$SpotAllocationStrategy": "

Indicates how to allocate instances across Spot Instance pools.

If the allocation strategy is lowest-price, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. If the allocation strategy is capacity-optimized, the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.

The default Spot allocation strategy for calls that you make through the API, the AWS CLI, or the AWS SDKs is lowest-price. The default Spot allocation strategy for the AWS Management Console is capacity-optimized.

Valid values: lowest-price | capacity-optimized

", "LaunchConfigurationNamesType$NextToken": "

The token for the next set of items to return. (You received this token from a previous call.)

", "LaunchConfigurationsType$NextToken": "

A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.

", "PoliciesType$NextToken": "

A string that indicates that the response contains more items than can be returned in a single response. To receive additional items, specify this string for the NextToken value when requesting the next set of items. This value is null when there are no more items to return.

", @@ -1575,15 +1583,15 @@ "Activity$Cause": "

The reason the activity began.

", "LifecycleHook$NotificationMetadata": "

Additional information that is included any time Amazon EC2 Auto Scaling sends a message to the notification target.

", "LifecycleHookSpecification$NotificationMetadata": "

Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.

", - "PredefinedMetricSpecification$ResourceLabel": "

Identifies the resource associated with the metric type. The following predefined metrics are available:

For predefined metric types ASGAverageCPUUtilization, ASGAverageNetworkIn, and ASGAverageNetworkOut, the parameter must not be specified as the resource associated with the metric type is the Auto Scaling group. For predefined metric type ALBRequestCountPerTarget, the parameter must be specified in the format: app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id , where app/load-balancer-name/load-balancer-id is the final portion of the load balancer ARN, and targetgroup/target-group-name/target-group-id is the final portion of the target group ARN. The target group must be attached to the Auto Scaling group.

", + "PredefinedMetricSpecification$ResourceLabel": "

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Auto Scaling group.

The format is app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id , where

", "PutLifecycleHookType$NotificationMetadata": "

Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.

" } }, "XmlStringMaxLen1600": { "base": null, "refs": { - "CreateLaunchConfigurationType$IamInstanceProfile": "

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.

For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.

", - "LaunchConfiguration$IamInstanceProfile": "

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.

For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.

", + "CreateLaunchConfigurationType$IamInstanceProfile": "

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.

For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.

", + "LaunchConfiguration$IamInstanceProfile": "

The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role.

For more information, see IAM Role for Applications That Run on Amazon EC2 Instances in the Amazon EC2 Auto Scaling User Guide.

", "TerminationPolicies$member": null } }, @@ -1621,6 +1629,7 @@ "AutoScalingGroup$LaunchConfigurationName": "

The name of the associated launch configuration.

", "AutoScalingGroup$PlacementGroup": "

The name of the placement group into which to launch your instances, if any.

", "AutoScalingGroup$Status": "

The current state of the group when DeleteAutoScalingGroup is in progress.

", + "AutoScalingInstanceDetails$InstanceType": "

The instance type of the EC2 instance.

", "AutoScalingInstanceDetails$AutoScalingGroupName": "

The name of the Auto Scaling group for the instance.

", "AutoScalingInstanceDetails$AvailabilityZone": "

The Availability Zone for the instance.

", "AutoScalingInstanceDetails$LaunchConfigurationName": "

The launch configuration used to launch the instance. This value is not available if you attached the instance to the Auto Scaling group.

", @@ -1634,26 +1643,27 @@ "CreateLaunchConfigurationType$LaunchConfigurationName": "

The name of the launch configuration. This name must be unique per Region per account.

", "CreateLaunchConfigurationType$ImageId": "

The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances.

If you do not specify InstanceId, you must specify ImageId.

", "CreateLaunchConfigurationType$KeyName": "

The name of the key pair. For more information, see Amazon EC2 Key Pairs in the Amazon EC2 User Guide for Linux Instances.

", - "CreateLaunchConfigurationType$ClassicLinkVPCId": "

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

This parameter can only be used if you are launching EC2-Classic instances.

", + "CreateLaunchConfigurationType$ClassicLinkVPCId": "

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to. For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

This parameter can only be used if you are launching EC2-Classic instances.

", "CreateLaunchConfigurationType$InstanceType": "

Specifies the instance type of the EC2 instance.

For information about available instance types, see Available Instance Types in the Amazon EC2 User Guide for Linux Instances.

If you do not specify InstanceId, you must specify InstanceType.

", "CreateLaunchConfigurationType$KernelId": "

The ID of the kernel associated with the AMI.

", "CreateLaunchConfigurationType$RamdiskId": "

The ID of the RAM disk to select.

", - "Ebs$SnapshotId": "

The snapshot ID of the volume to use.

Conditional: This parameter is optional if you specify a volume size. If you specify both SnapshotId and VolumeSize, VolumeSize must be equal or greater than the size of the snapshot.

", + "Ebs$SnapshotId": "

The snapshot ID of the volume to use.

Conditional: This parameter is optional if you specify a volume size. If you specify both SnapshotId and VolumeSize, VolumeSize must be equal or greater than the size of the snapshot.

", "EnableMetricsCollectionQuery$Granularity": "

The granularity to associate with the metrics to collect. The only valid value is 1Minute.

", "EnabledMetric$Metric": "

One of the following metrics:

", "EnabledMetric$Granularity": "

The granularity of the metric. The only valid value is 1Minute.

", "FailedScheduledUpdateGroupActionRequest$ScheduledActionName": "

The name of the scheduled action.

", + "Instance$InstanceType": "

The instance type of the EC2 instance.

", "Instance$AvailabilityZone": "

The Availability Zone in which the instance is running.

", "Instance$LaunchConfigurationName": "

The launch configuration associated with the instance.

", "InvalidNextToken$message": "

", "LaunchConfiguration$LaunchConfigurationName": "

The name of the launch configuration.

", - "LaunchConfiguration$ImageId": "

The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances.

For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances.

", + "LaunchConfiguration$ImageId": "

The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances.

For more information, see Finding an AMI in the Amazon EC2 User Guide for Linux Instances.

", "LaunchConfiguration$KeyName": "

The name of the key pair.

For more information, see Amazon EC2 Key Pairs in the Amazon EC2 User Guide for Linux Instances.

", - "LaunchConfiguration$ClassicLinkVPCId": "

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to.

For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

", + "LaunchConfiguration$ClassicLinkVPCId": "

The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to.

For more information, see ClassicLink in the Amazon EC2 User Guide for Linux Instances and Linking EC2-Classic Instances to a VPC in the Amazon EC2 Auto Scaling User Guide.

", "LaunchConfiguration$InstanceType": "

The instance type for the instances.

For information about available instance types, see Available Instance Types in the Amazon EC2 User Guide for Linux Instances.

", "LaunchConfiguration$KernelId": "

The ID of the kernel associated with the AMI.

", "LaunchConfiguration$RamdiskId": "

The ID of the RAM disk associated with the AMI.

", - "LaunchTemplateOverrides$InstanceType": "

The instance type.

For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.

", + "LaunchTemplateOverrides$InstanceType": "

The instance type.

For information about available instance types, see Available Instance Types in the Amazon Elastic Compute Cloud User Guide.

", "LaunchTemplateSpecification$LaunchTemplateId": "

The ID of the launch template. You must specify either a template ID or a template name.

", "LaunchTemplateSpecification$Version": "

The version number, $Latest, or $Default. If the value is $Latest, Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default, Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default.

", "LimitExceededFault$message": "

", @@ -1679,7 +1689,7 @@ "ScalingPolicy$AdjustmentType": "

The adjustment type, which specifies how ScalingAdjustment is interpreted. The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity.

", "ScheduledUpdateGroupAction$AutoScalingGroupName": "

The name of the Auto Scaling group.

", "ScheduledUpdateGroupAction$ScheduledActionName": "

The name of the scheduled action.

", - "ScheduledUpdateGroupAction$Recurrence": "

The recurring schedule for the action, in Unix cron syntax format.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

", + "ScheduledUpdateGroupAction$Recurrence": "

The recurring schedule for the action, in Unix cron syntax format.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

", "ScheduledUpdateGroupActionRequest$ScheduledActionName": "

The name of the scaling action.

", "ScheduledUpdateGroupActionRequest$Recurrence": "

The recurring schedule for the action, in Unix cron syntax format. This format consists of five fields separated by white spaces: [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week]. The value must be in quotes (for example, \"30 0 1 1,6,12 *\"). For more information about this format, see Crontab.

When StartTime and EndTime are specified with Recurrence, they form the boundaries of when the recurring action starts and stops.

", "ServiceLinkedRoleFailure$message": null, @@ -1694,8 +1704,11 @@ "AutoScalingGroup$HealthCheckType": "

The service to use for the health checks. The valid values are EC2 and ELB. If you configure an Auto Scaling group to use ELB health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.

", "AutoScalingInstanceDetails$LifecycleState": "

The lifecycle state for the instance.

", "AutoScalingInstanceDetails$HealthStatus": "

The last reported health status of this instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and Amazon EC2 Auto Scaling should terminate and replace it.

", + "AutoScalingInstanceDetails$WeightedCapacity": "

The number of capacity units contributed by the instance based on its instance type.

Valid Range: Minimum value of 1. Maximum value of 999.

", "CreateAutoScalingGroupType$HealthCheckType": "

The service to use for the health checks. The valid values are EC2 and ELB. The default value is EC2. If you configure an Auto Scaling group to use ELB health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks.

For more information, see Health Checks for Auto Scaling Instances in the Amazon EC2 Auto Scaling User Guide.

", "Instance$HealthStatus": "

The last reported health status of the instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and that Amazon EC2 Auto Scaling should terminate and replace it.

", + "Instance$WeightedCapacity": "

The number of capacity units contributed by the instance based on its instance type.

Valid Range: Minimum value of 1. Maximum value of 999.

", + "LaunchTemplateOverrides$WeightedCapacity": "

The number of capacity units, which gives the instance type a proportional weight to other instance types. For example, larger instance types are generally weighted more than smaller instance types. These are the same units that you chose to set the desired capacity in terms of instances, or a performance attribute such as vCPUs, memory, or I/O.

Valid Range: Minimum value of 1. Maximum value of 999.

", "PutScalingPolicyType$MetricAggregationType": "

The aggregation type for the CloudWatch metrics. The valid values are Minimum, Maximum, and Average. If the aggregation type is null, the value is treated as Average.

Valid only if the policy type is StepScaling.

", "ScalingPolicy$MetricAggregationType": "

The aggregation type for the CloudWatch metrics. The valid values are Minimum, Maximum, and Average.

", "SetInstanceHealthQuery$HealthStatus": "

The health status of the instance. Set to Healthy to have the instance remain in service. Set to Unhealthy to have the instance be out of service. Amazon EC2 Auto Scaling terminates and replaces the unhealthy instance.

", @@ -1712,7 +1725,7 @@ "XmlStringMaxLen64": { "base": null, "refs": { - "CreateLaunchConfigurationType$PlacementTenancy": "

The tenancy of the instance. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.

To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default), you must set the value of this parameter to dedicated.

If you specify PlacementTenancy, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

For more information, see Instance Placement Tenancy in the Amazon EC2 Auto Scaling User Guide.

Valid values: default | dedicated

", + "CreateLaunchConfigurationType$PlacementTenancy": "

The tenancy of the instance. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.

To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to default), you must set the value of this parameter to dedicated.

If you specify PlacementTenancy, you must specify at least one subnet for VPCZoneIdentifier when you create your group.

For more information, see Instance Placement Tenancy in the Amazon EC2 Auto Scaling User Guide.

Valid Values: default | dedicated

", "FailedScheduledUpdateGroupActionRequest$ErrorCode": "

The error code.

", "LaunchConfiguration$PlacementTenancy": "

The tenancy of the instance, either default or dedicated. An instance with dedicated tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC.

For more information, see Instance Placement Tenancy in the Amazon EC2 Auto Scaling User Guide.

", "PolicyTypes$member": null, @@ -1724,7 +1737,7 @@ "base": null, "refs": { "CreateLaunchConfigurationType$UserData": "

The Base64-encoded user data to make available to the launched EC2 instances. For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.

", - "LaunchConfiguration$UserData": "

The Base64-encoded user data to make available to the launched EC2 instances.

For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.

" + "LaunchConfiguration$UserData": "

The Base64-encoded user data to make available to the launched EC2 instances.

For more information, see Instance Metadata and User Data in the Amazon EC2 User Guide for Linux Instances.

" } } } diff --git a/models/apis/ce/2017-10-25/api-2.json b/models/apis/ce/2017-10-25/api-2.json index 67364aedad8..0f9c312ab53 100644 --- a/models/apis/ce/2017-10-25/api-2.json +++ b/models/apis/ce/2017-10-25/api-2.json @@ -1192,6 +1192,7 @@ "CurrencyCode":{"shape":"GenericString"}, "EstimatedSPCost":{"shape":"GenericString"}, "EstimatedOnDemandCost":{"shape":"GenericString"}, + "EstimatedOnDemandCostWithCurrentCommitment":{"shape":"GenericString"}, "EstimatedSavingsAmount":{"shape":"GenericString"}, "EstimatedSavingsPercentage":{"shape":"GenericString"}, "HourlyCommitmentToPurchase":{"shape":"GenericString"}, @@ -1225,7 +1226,8 @@ "DailyCommitmentToPurchase":{"shape":"GenericString"}, "HourlyCommitmentToPurchase":{"shape":"GenericString"}, "EstimatedSavingsPercentage":{"shape":"GenericString"}, - "EstimatedMonthlySavingsAmount":{"shape":"GenericString"} + "EstimatedMonthlySavingsAmount":{"shape":"GenericString"}, + "EstimatedOnDemandCostWithCurrentCommitment":{"shape":"GenericString"} } }, "SavingsPlansSavings":{ diff --git a/models/apis/ce/2017-10-25/docs-2.json b/models/apis/ce/2017-10-25/docs-2.json index 8fb24b13f36..c2f399ba0fe 100644 --- a/models/apis/ce/2017-10-25/docs-2.json +++ b/models/apis/ce/2017-10-25/docs-2.json @@ -388,6 +388,7 @@ "SavingsPlansPurchaseRecommendationDetail$CurrencyCode": "

The currency code Amazon Web Services used to generate the recommendations and present potential savings.

", "SavingsPlansPurchaseRecommendationDetail$EstimatedSPCost": "

The cost of the recommended Savings Plans over the length of the lookback period.

", "SavingsPlansPurchaseRecommendationDetail$EstimatedOnDemandCost": "

The remaining On-Demand cost estimated to not be covered by the recommended Savings Plans, over the length of the lookback period.

", + "SavingsPlansPurchaseRecommendationDetail$EstimatedOnDemandCostWithCurrentCommitment": "

The estimated On-Demand costs you would expect with no additional commitment, based on your usage of the selected time period and the Savings Plans you own.

", "SavingsPlansPurchaseRecommendationDetail$EstimatedSavingsAmount": "

The estimated savings amount based on the recommended Savings Plans over the length of the lookback period.

", "SavingsPlansPurchaseRecommendationDetail$EstimatedSavingsPercentage": "

The estimated savings percentage relative to the total cost of applicable On-Demand usage over the lookback period.

", "SavingsPlansPurchaseRecommendationDetail$HourlyCommitmentToPurchase": "

The recommended hourly commitment level for the Savings Plans type, and configuration based on the usage during the lookback period.

", @@ -408,6 +409,7 @@ "SavingsPlansPurchaseRecommendationSummary$HourlyCommitmentToPurchase": "

The recommended hourly commitment based on the recommendation parameters.

", "SavingsPlansPurchaseRecommendationSummary$EstimatedSavingsPercentage": "

The estimated savings relative to the total cost of On-Demand usage, over the lookback period. This is calculated as estimatedSavingsAmount/ CurrentOnDemandSpend*100.

", "SavingsPlansPurchaseRecommendationSummary$EstimatedMonthlySavingsAmount": "

The estimated monthly savings amount, based on the recommended Savings Plans purchase.

", + "SavingsPlansPurchaseRecommendationSummary$EstimatedOnDemandCostWithCurrentCommitment": "

The estimated On-Demand costs you would expect with no additional commitment, based on your usage of the selected time period and the Savings Plans you own.

", "SavingsPlansSavings$NetSavings": "

The savings amount that you are accumulating for the usage that is covered by a Savings Plans, when compared to the On-Demand equivalent of the same usage.

", "SavingsPlansSavings$OnDemandCostEquivalent": "

How much the amount that the usage would have cost if it was accrued at the On-Demand rate.

", "SavingsPlansUtilization$TotalCommitment": "

The total amount of Savings Plans commitment that's been purchased in an account (or set of accounts).

", @@ -564,7 +566,7 @@ "Granularity": { "base": null, "refs": { - "GetCostAndUsageRequest$Granularity": "

Sets the AWS cost granularity to MONTHLY or DAILY, or HOURLY. If Granularity isn't set, the response object doesn't include the Granularity, either MONTHLY or DAILY, or HOURLY.

The GetCostAndUsageRequest operation supports only DAILY and MONTHLY granularities.

", + "GetCostAndUsageRequest$Granularity": "

Sets the AWS cost granularity to MONTHLY or DAILY, or HOURLY. If Granularity isn't set, the response object doesn't include the Granularity, either MONTHLY or DAILY, or HOURLY.

", "GetCostAndUsageWithResourcesRequest$Granularity": "

Sets the AWS cost granularity to MONTHLY, DAILY, or HOURLY. If Granularity isn't set, the response object doesn't include the Granularity, MONTHLY, DAILY, or HOURLY.

", "GetCostForecastRequest$Granularity": "

How granular you want the forecast to be. You can get 3 months of DAILY forecasts or 12 months of MONTHLY forecasts.

The GetCostForecast operation supports only DAILY and MONTHLY granularities.

", "GetReservationCoverageRequest$Granularity": "

The granularity of the AWS cost data for the reservation. Valid values are MONTHLY and DAILY.

If GroupBy is set, Granularity can't be set. If Granularity isn't set, the response object doesn't include Granularity, either MONTHLY or DAILY.

The GetReservationCoverage operation supports only DAILY and MONTHLY granularities.

", @@ -601,7 +603,7 @@ "GroupDefinitions": { "base": null, "refs": { - "GetCostAndUsageRequest$GroupBy": "

You can group AWS costs using up to two different groups, either dimensions, tag keys, or both.

When you group by tag key, you get all tag values, including empty strings.

Valid values are AZ, INSTANCE_TYPE, LEGAL_ENTITY_NAME, LINKED_ACCOUNT, OPERATION, PLATFORM, PURCHASE_TYPE, SERVICE, TAGS, TENANCY, and USAGE_TYPE.

", + "GetCostAndUsageRequest$GroupBy": "

You can group AWS costs using up to two different groups, either dimensions, tag keys, or both.

When you group by tag key, you get all tag values, including empty strings.

Valid values are AZ, INSTANCE_TYPE, LEGAL_ENTITY_NAME, LINKED_ACCOUNT, OPERATION, PLATFORM, PURCHASE_TYPE, SERVICE, TAGS, TENANCY, RECORD_TYPE, and USAGE_TYPE.

", "GetCostAndUsageResponse$GroupDefinitions": "

The groups that are specified by the Filter or GroupBy parameters in the request.

", "GetCostAndUsageWithResourcesRequest$GroupBy": "

You can group Amazon Web Services costs using up to two different groups: either dimensions, tag keys, or both.

", "GetCostAndUsageWithResourcesResponse$GroupDefinitions": "

The groups that are specified by the Filter or GroupBy parameters in the request.

", diff --git a/models/apis/chime/2018-05-01/api-2.json b/models/apis/chime/2018-05-01/api-2.json index 6135127011e..024e5c9160f 100644 --- a/models/apis/chime/2018-05-01/api-2.json +++ b/models/apis/chime/2018-05-01/api-2.json @@ -70,6 +70,44 @@ {"shape":"ServiceFailureException"} ] }, + "BatchCreateAttendee":{ + "name":"BatchCreateAttendee", + "http":{ + "method":"POST", + "requestUri":"/meetings/{meetingId}/attendees?operation=batch-create", + "responseCode":201 + }, + "input":{"shape":"BatchCreateAttendeeRequest"}, + "output":{"shape":"BatchCreateAttendeeResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, + "BatchCreateRoomMembership":{ + "name":"BatchCreateRoomMembership", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/rooms/{roomId}/memberships?operation=batch-create", + "responseCode":201 + }, + "input":{"shape":"BatchCreateRoomMembershipRequest"}, + "output":{"shape":"BatchCreateRoomMembershipResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "BatchDeletePhoneNumber":{ "name":"BatchDeletePhoneNumber", "http":{ @@ -184,6 +222,26 @@ {"shape":"ServiceFailureException"} ] }, + "CreateAttendee":{ + "name":"CreateAttendee", + "http":{ + "method":"POST", + "requestUri":"/meetings/{meetingId}/attendees", + "responseCode":201 + }, + "input":{"shape":"CreateAttendeeRequest"}, + "output":{"shape":"CreateAttendeeResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "CreateBot":{ "name":"CreateBot", "http":{ @@ -203,6 +261,25 @@ {"shape":"NotFoundException"} ] }, + "CreateMeeting":{ + "name":"CreateMeeting", + "http":{ + "method":"POST", + "requestUri":"/meetings", + "responseCode":201 + }, + "input":{"shape":"CreateMeetingRequest"}, + "output":{"shape":"CreateMeetingResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "CreatePhoneNumberOrder":{ "name":"CreatePhoneNumberOrder", "http":{ @@ -223,6 +300,45 @@ {"shape":"ServiceFailureException"} ] }, + "CreateRoom":{ + "name":"CreateRoom", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/rooms", + "responseCode":201 + }, + "input":{"shape":"CreateRoomRequest"}, + "output":{"shape":"CreateRoomResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, + "CreateRoomMembership":{ + "name":"CreateRoomMembership", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/rooms/{roomId}/memberships", + "responseCode":201 + }, + "input":{"shape":"CreateRoomMembershipRequest"}, + "output":{"shape":"CreateRoomMembershipResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "CreateVoiceConnector":{ "name":"CreateVoiceConnector", "http":{ @@ -283,6 +399,24 @@ {"shape":"ServiceFailureException"} ] }, + "DeleteAttendee":{ + "name":"DeleteAttendee", + "http":{ + "method":"DELETE", + "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}", + "responseCode":204 + }, + "input":{"shape":"DeleteAttendeeRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "DeleteEventsConfiguration":{ "name":"DeleteEventsConfiguration", "http":{ @@ -300,6 +434,24 @@ {"shape":"ResourceLimitExceededException"} ] }, + "DeleteMeeting":{ + "name":"DeleteMeeting", + "http":{ + "method":"DELETE", + "requestUri":"/meetings/{meetingId}", + "responseCode":204 + }, + "input":{"shape":"DeleteMeetingRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "DeletePhoneNumber":{ "name":"DeletePhoneNumber", "http":{ @@ -318,6 +470,40 @@ {"shape":"ServiceFailureException"} ] }, + "DeleteRoom":{ + "name":"DeleteRoom", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{accountId}/rooms/{roomId}", + "responseCode":204 + }, + "input":{"shape":"DeleteRoomRequest"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, + "DeleteRoomMembership":{ + "name":"DeleteRoomMembership", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{accountId}/rooms/{roomId}/memberships/{memberId}", + "responseCode":204 + }, + "input":{"shape":"DeleteRoomMembershipRequest"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "DeleteVoiceConnector":{ "name":"DeleteVoiceConnector", "http":{ @@ -521,6 +707,25 @@ {"shape":"ServiceFailureException"} ] }, + "GetAttendee":{ + "name":"GetAttendee", + "http":{ + "method":"GET", + "requestUri":"/meetings/{meetingId}/attendees/{attendeeId}", + "responseCode":200 + }, + "input":{"shape":"GetAttendeeRequest"}, + "output":{"shape":"GetAttendeeResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "GetBot":{ "name":"GetBot", "http":{ @@ -575,6 +780,25 @@ {"shape":"ServiceFailureException"} ] }, + "GetMeeting":{ + "name":"GetMeeting", + "http":{ + "method":"GET", + "requestUri":"/meetings/{meetingId}", + "responseCode":200 + }, + "input":{"shape":"GetMeetingRequest"}, + "output":{"shape":"GetMeetingResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "GetPhoneNumber":{ "name":"GetPhoneNumber", "http":{ @@ -629,6 +853,24 @@ {"shape":"ServiceFailureException"} ] }, + "GetRoom":{ + "name":"GetRoom", + "http":{ + "method":"GET", + "requestUri":"/accounts/{accountId}/rooms/{roomId}", + "responseCode":200 + }, + "input":{"shape":"GetRoomRequest"}, + "output":{"shape":"GetRoomResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "GetUser":{ "name":"GetUser", "http":{ @@ -837,6 +1079,25 @@ {"shape":"ServiceFailureException"} ] }, + "ListAttendees":{ + "name":"ListAttendees", + "http":{ + "method":"GET", + "requestUri":"/meetings/{meetingId}/attendees", + "responseCode":200 + }, + "input":{"shape":"ListAttendeesRequest"}, + "output":{"shape":"ListAttendeesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "ListBots":{ "name":"ListBots", "http":{ @@ -855,6 +1116,24 @@ {"shape":"NotFoundException"} ] }, + "ListMeetings":{ + "name":"ListMeetings", + "http":{ + "method":"GET", + "requestUri":"/meetings", + "responseCode":200 + }, + "input":{"shape":"ListMeetingsRequest"}, + "output":{"shape":"ListMeetingsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ThrottledClientException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "ListPhoneNumberOrders":{ "name":"ListPhoneNumberOrders", "http":{ @@ -890,6 +1169,42 @@ {"shape":"ServiceFailureException"} ] }, + "ListRoomMemberships":{ + "name":"ListRoomMemberships", + "http":{ + "method":"GET", + "requestUri":"/accounts/{accountId}/rooms/{roomId}/memberships", + "responseCode":200 + }, + "input":{"shape":"ListRoomMembershipsRequest"}, + "output":{"shape":"ListRoomMembershipsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, + "ListRooms":{ + "name":"ListRooms", + "http":{ + "method":"GET", + "requestUri":"/accounts/{accountId}/rooms", + "responseCode":200 + }, + "input":{"shape":"ListRoomsRequest"}, + "output":{"shape":"ListRoomsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "ListUsers":{ "name":"ListUsers", "http":{ @@ -1282,6 +1597,42 @@ {"shape":"ServiceFailureException"} ] }, + "UpdateRoom":{ + "name":"UpdateRoom", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/rooms/{roomId}", + "responseCode":200 + }, + "input":{"shape":"UpdateRoomRequest"}, + "output":{"shape":"UpdateRoomResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedClientException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, + "UpdateRoomMembership":{ + "name":"UpdateRoomMembership", + "http":{ + "method":"POST", + "requestUri":"/accounts/{accountId}/rooms/{roomId}/memberships/{memberId}", + "responseCode":200 + }, + "input":{"shape":"UpdateRoomMembershipRequest"}, + "output":{"shape":"UpdateRoomMembershipResponse"}, + "errors":[ + {"shape":"UnauthorizedClientException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"}, + {"shape":"ForbiddenException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ServiceFailureException"} + ] + }, "UpdateUser":{ "name":"UpdateUser", "http":{ @@ -1412,6 +1763,13 @@ "EnterpriseOIDC" ] }, + "Arn":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^arn[\\/\\:\\-\\_\\.a-zA-Z0-9]+$", + "sensitive":true + }, "AssociatePhoneNumberWithUserRequest":{ "type":"structure", "required":[ @@ -1476,6 +1834,18 @@ "PhoneNumberErrors":{"shape":"PhoneNumberErrorList"} } }, + "Attendee":{ + "type":"structure", + "members":{ + "ExternalUserId":{"shape":"ExternalUserIdType"}, + "AttendeeId":{"shape":"GuidString"}, + "JoinToken":{"shape":"JoinTokenString"} + } + }, + "AttendeeList":{ + "type":"list", + "member":{"shape":"Attendee"} + }, "BadRequestException":{ "type":"structure", "members":{ @@ -1485,6 +1855,59 @@ "error":{"httpStatusCode":400}, "exception":true }, + "BatchCreateAttendeeErrorList":{ + "type":"list", + "member":{"shape":"CreateAttendeeError"} + }, + "BatchCreateAttendeeRequest":{ + "type":"structure", + "required":[ + "MeetingId", + "Attendees" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "location":"uri", + "locationName":"meetingId" + }, + "Attendees":{"shape":"CreateAttendeeRequestItemList"} + } + }, + "BatchCreateAttendeeResponse":{ + "type":"structure", + "members":{ + "Attendees":{"shape":"AttendeeList"}, + "Errors":{"shape":"BatchCreateAttendeeErrorList"} + } + }, + "BatchCreateRoomMembershipRequest":{ + "type":"structure", + "required":[ + "AccountId", + "RoomId", + "MembershipItemList" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"accountId" + }, + "RoomId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"roomId" + }, + "MembershipItemList":{"shape":"MembershipItemList"} + } + }, + "BatchCreateRoomMembershipResponse":{ + "type":"structure", + "members":{ + "Errors":{"shape":"MemberErrorList"} + } + }, "BatchDeletePhoneNumberRequest":{ "type":"structure", "required":["PhoneNumberIds"], @@ -1625,6 +2048,13 @@ "type":"list", "member":{"shape":"CallingRegion"} }, + "ClientRequestToken":{ + "type":"string", + "max":64, + "min":2, + "pattern":"[-_a-zA-Z0-9]*", + "sensitive":true + }, "ConflictException":{ "type":"structure", "members":{ @@ -1651,6 +2081,46 @@ "Account":{"shape":"Account"} } }, + "CreateAttendeeError":{ + "type":"structure", + "members":{ + "ExternalUserId":{"shape":"ExternalUserIdType"}, + "ErrorCode":{"shape":"String"}, + "ErrorMessage":{"shape":"String"} + } + }, + "CreateAttendeeRequest":{ + "type":"structure", + "required":[ + "MeetingId", + "ExternalUserId" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "location":"uri", + "locationName":"meetingId" + }, + "ExternalUserId":{"shape":"ExternalUserIdType"} + } + }, + "CreateAttendeeRequestItem":{ + "type":"structure", + "required":["ExternalUserId"], + "members":{ + "ExternalUserId":{"shape":"ExternalUserIdType"} + } + }, + "CreateAttendeeRequestItemList":{ + "type":"list", + "member":{"shape":"CreateAttendeeRequestItem"} + }, + "CreateAttendeeResponse":{ + "type":"structure", + "members":{ + "Attendee":{"shape":"Attendee"} + } + }, "CreateBotRequest":{ "type":"structure", "required":[ @@ -1673,6 +2143,25 @@ "Bot":{"shape":"Bot"} } }, + "CreateMeetingRequest":{ + "type":"structure", + "required":["ClientRequestToken"], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "MeetingHostId":{"shape":"ExternalUserIdType"}, + "MediaRegion":{"shape":"String"}, + "NotificationsConfiguration":{"shape":"MeetingNotificationConfiguration"} + } + }, + "CreateMeetingResponse":{ + "type":"structure", + "members":{ + "Meeting":{"shape":"Meeting"} + } + }, "CreatePhoneNumberOrderRequest":{ "type":"structure", "required":[ @@ -1690,6 +2179,59 @@ "PhoneNumberOrder":{"shape":"PhoneNumberOrder"} } }, + "CreateRoomMembershipRequest":{ + "type":"structure", + "required":[ + "AccountId", + "RoomId", + "MemberId" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"accountId" + }, + "RoomId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"roomId" + }, + "MemberId":{"shape":"NonEmptyString"}, + "Role":{"shape":"RoomMembershipRole"} + } + }, + "CreateRoomMembershipResponse":{ + "type":"structure", + "members":{ + "RoomMembership":{"shape":"RoomMembership"} + } + }, + "CreateRoomRequest":{ + "type":"structure", + "required":[ + "AccountId", + "Name" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"accountId" + }, + "Name":{"shape":"SensitiveString"}, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + } + }, + "CreateRoomResponse":{ + "type":"structure", + "members":{ + "Room":{"shape":"Room"} + } + }, "CreateVoiceConnectorGroupRequest":{ "type":"structure", "required":["Name"], @@ -1753,6 +2295,25 @@ "members":{ } }, + "DeleteAttendeeRequest":{ + "type":"structure", + "required":[ + "MeetingId", + "AttendeeId" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "location":"uri", + "locationName":"meetingId" + }, + "AttendeeId":{ + "shape":"GuidString", + "location":"uri", + "locationName":"attendeeId" + } + } + }, "DeleteEventsConfigurationRequest":{ "type":"structure", "required":[ @@ -1772,6 +2333,17 @@ } } }, + "DeleteMeetingRequest":{ + "type":"structure", + "required":["MeetingId"], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "location":"uri", + "locationName":"meetingId" + } + } + }, "DeletePhoneNumberRequest":{ "type":"structure", "required":["PhoneNumberId"], @@ -1779,7 +2351,51 @@ "PhoneNumberId":{ "shape":"String", "location":"uri", - "locationName":"phoneNumberId" + "locationName":"phoneNumberId" + } + } + }, + "DeleteRoomMembershipRequest":{ + "type":"structure", + "required":[ + "AccountId", + "RoomId", + "MemberId" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"accountId" + }, + "RoomId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"roomId" + }, + "MemberId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"memberId" + } + } + }, + "DeleteRoomRequest":{ + "type":"structure", + "required":[ + "AccountId", + "RoomId" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"accountId" + }, + "RoomId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"roomId" } } }, @@ -1959,6 +2575,12 @@ "LambdaFunctionArn":{"shape":"SensitiveString"} } }, + "ExternalUserIdType":{ + "type":"string", + "max":64, + "min":2, + "sensitive":true + }, "ForbiddenException":{ "type":"structure", "members":{ @@ -2002,6 +2624,31 @@ "AccountSettings":{"shape":"AccountSettings"} } }, + "GetAttendeeRequest":{ + "type":"structure", + "required":[ + "MeetingId", + "AttendeeId" + ], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "location":"uri", + "locationName":"meetingId" + }, + "AttendeeId":{ + "shape":"GuidString", + "location":"uri", + "locationName":"attendeeId" + } + } + }, + "GetAttendeeResponse":{ + "type":"structure", + "members":{ + "Attendee":{"shape":"Attendee"} + } + }, "GetBotRequest":{ "type":"structure", "required":[ @@ -2059,6 +2706,23 @@ "VoiceConnector":{"shape":"VoiceConnectorSettings"} } }, + "GetMeetingRequest":{ + "type":"structure", + "required":["MeetingId"], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "location":"uri", + "locationName":"meetingId" + } + } + }, + "GetMeetingResponse":{ + "type":"structure", + "members":{ + "Meeting":{"shape":"Meeting"} + } + }, "GetPhoneNumberOrderRequest":{ "type":"structure", "required":["PhoneNumberOrderId"], @@ -2100,6 +2764,31 @@ "CallingNameUpdatedTimestamp":{"shape":"Iso8601Timestamp"} } }, + "GetRoomRequest":{ + "type":"structure", + "required":[ + "AccountId", + "RoomId" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"accountId" + }, + "RoomId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"roomId" + } + } + }, + "GetRoomResponse":{ + "type":"structure", + "members":{ + "Room":{"shape":"Room"} + } + }, "GetUserRequest":{ "type":"structure", "required":[ @@ -2319,6 +3008,12 @@ "type":"timestamp", "timestampFormat":"iso8601" }, + "JoinTokenString":{ + "type":"string", + "max":2048, + "min":2, + "sensitive":true + }, "License":{ "type":"string", "enum":[ @@ -2364,6 +3059,34 @@ "NextToken":{"shape":"String"} } }, + "ListAttendeesRequest":{ + "type":"structure", + "required":["MeetingId"], + "members":{ + "MeetingId":{ + "shape":"GuidString", + "location":"uri", + "locationName":"meetingId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"ResultMax", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListAttendeesResponse":{ + "type":"structure", + "members":{ + "Attendees":{"shape":"AttendeeList"}, + "NextToken":{"shape":"String"} + } + }, "ListBotsRequest":{ "type":"structure", "required":["AccountId"], @@ -2392,6 +3115,28 @@ "NextToken":{"shape":"String"} } }, + "ListMeetingsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"ResultMax", + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListMeetingsResponse":{ + "type":"structure", + "members":{ + "Meetings":{"shape":"MeetingList"}, + "NextToken":{"shape":"String"} + } + }, "ListPhoneNumberOrdersRequest":{ "type":"structure", "members":{ @@ -2456,6 +3201,75 @@ "NextToken":{"shape":"String"} } }, + "ListRoomMembershipsRequest":{ + "type":"structure", + "required":[ + "AccountId", + "RoomId" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"accountId" + }, + "RoomId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"roomId" + }, + "MaxResults":{ + "shape":"ResultMax", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListRoomMembershipsResponse":{ + "type":"structure", + "members":{ + "RoomMemberships":{"shape":"RoomMembershipList"}, + "NextToken":{"shape":"String"} + } + }, + "ListRoomsRequest":{ + "type":"structure", + "required":["AccountId"], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"accountId" + }, + "MemberId":{ + "shape":"String", + "location":"querystring", + "locationName":"member-id" + }, + "MaxResults":{ + "shape":"ResultMax", + "location":"querystring", + "locationName":"max-results" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + } + } + }, + "ListRoomsResponse":{ + "type":"structure", + "members":{ + "Rooms":{"shape":"RoomList"}, + "NextToken":{"shape":"String"} + } + }, "ListUsersRequest":{ "type":"structure", "required":["AccountId"], @@ -2580,6 +3394,78 @@ "members":{ } }, + "MediaPlacement":{ + "type":"structure", + "members":{ + "AudioHostUrl":{"shape":"UriType"}, + "ScreenDataUrl":{"shape":"UriType"}, + "ScreenSharingUrl":{"shape":"UriType"}, + "ScreenViewingUrl":{"shape":"UriType"}, + "SignalingUrl":{"shape":"UriType"}, + "TurnControlUrl":{"shape":"UriType"} + } + }, + "Meeting":{ + "type":"structure", + "members":{ + "MeetingId":{"shape":"GuidString"}, + "MediaPlacement":{"shape":"MediaPlacement"}, + "MediaRegion":{"shape":"String"} + } + }, + "MeetingList":{ + "type":"list", + "member":{"shape":"Meeting"} + }, + "MeetingNotificationConfiguration":{ + "type":"structure", + "members":{ + "SnsTopicArn":{"shape":"Arn"}, + "SqsQueueArn":{"shape":"Arn"} + } + }, + "Member":{ + "type":"structure", + "members":{ + "MemberId":{"shape":"NonEmptyString"}, + "MemberType":{"shape":"MemberType"}, + "Email":{"shape":"SensitiveString"}, + "FullName":{"shape":"SensitiveString"}, + "AccountId":{"shape":"NonEmptyString"} + } + }, + "MemberError":{ + "type":"structure", + "members":{ + "MemberId":{"shape":"NonEmptyString"}, + "ErrorCode":{"shape":"ErrorCode"}, + "ErrorMessage":{"shape":"String"} + } + }, + "MemberErrorList":{ + "type":"list", + "member":{"shape":"MemberError"} + }, + "MemberType":{ + "type":"string", + "enum":[ + "User", + "Bot", + "Webhook" + ] + }, + "MembershipItem":{ + "type":"structure", + "members":{ + "MemberId":{"shape":"NonEmptyString"}, + "Role":{"shape":"RoomMembershipRole"} + } + }, + "MembershipItemList":{ + "type":"list", + "member":{"shape":"MembershipItem"}, + "max":50 + }, "NonEmptyString":{ "type":"string", "pattern":".*\\S.*" @@ -2999,6 +3885,42 @@ "max":99, "min":1 }, + "Room":{ + "type":"structure", + "members":{ + "RoomId":{"shape":"NonEmptyString"}, + "Name":{"shape":"SensitiveString"}, + "AccountId":{"shape":"NonEmptyString"}, + "CreatedBy":{"shape":"NonEmptyString"}, + "CreatedTimestamp":{"shape":"Iso8601Timestamp"}, + "UpdatedTimestamp":{"shape":"Iso8601Timestamp"} + } + }, + "RoomList":{ + "type":"list", + "member":{"shape":"Room"} + }, + "RoomMembership":{ + "type":"structure", + "members":{ + "RoomId":{"shape":"NonEmptyString"}, + "Member":{"shape":"Member"}, + "Role":{"shape":"RoomMembershipRole"}, + "InvitedBy":{"shape":"NonEmptyString"}, + "UpdatedTimestamp":{"shape":"Iso8601Timestamp"} + } + }, + "RoomMembershipList":{ + "type":"list", + "member":{"shape":"RoomMembership"} + }, + "RoomMembershipRole":{ + "type":"string", + "enum":[ + "Administrator", + "Member" + ] + }, "SearchAvailablePhoneNumbersRequest":{ "type":"structure", "members":{ @@ -3263,6 +4185,64 @@ "CallingName":{"shape":"CallingName"} } }, + "UpdateRoomMembershipRequest":{ + "type":"structure", + "required":[ + "AccountId", + "RoomId", + "MemberId" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"accountId" + }, + "RoomId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"roomId" + }, + "MemberId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"memberId" + }, + "Role":{"shape":"RoomMembershipRole"} + } + }, + "UpdateRoomMembershipResponse":{ + "type":"structure", + "members":{ + "RoomMembership":{"shape":"RoomMembership"} + } + }, + "UpdateRoomRequest":{ + "type":"structure", + "required":[ + "AccountId", + "RoomId" + ], + "members":{ + "AccountId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"accountId" + }, + "RoomId":{ + "shape":"NonEmptyString", + "location":"uri", + "locationName":"roomId" + }, + "Name":{"shape":"SensitiveString"} + } + }, + "UpdateRoomResponse":{ + "type":"structure", + "members":{ + "Room":{"shape":"Room"} + } + }, "UpdateUserRequest":{ "type":"structure", "required":[ @@ -3369,6 +4349,10 @@ "VoiceConnector":{"shape":"VoiceConnector"} } }, + "UriType":{ + "type":"string", + "max":4096 + }, "User":{ "type":"structure", "required":["UserId"], diff --git a/models/apis/chime/2018-05-01/docs-2.json b/models/apis/chime/2018-05-01/docs-2.json index c28d9f63071..f6a1f76d14a 100644 --- a/models/apis/chime/2018-05-01/docs-2.json +++ b/models/apis/chime/2018-05-01/docs-2.json @@ -1,23 +1,33 @@ { "version": "2.0", - "service": "

The Amazon Chime API (application programming interface) is designed for administrators to use to perform key tasks, such as creating and managing Amazon Chime accounts and users. This guide provides detailed information about the Amazon Chime API, including operations, types, inputs and outputs, and error codes.

You can use an AWS SDK, the AWS Command Line Interface (AWS CLI), or the REST API to make API calls. We recommend using an AWS SDK or the AWS CLI. Each API operation includes links to information about using it with a language-specific AWS SDK or the AWS CLI.

Using an AWS SDK

You don't need to write code to calculate a signature for request authentication. The SDK clients authenticate your requests by using access keys that you provide. For more information about AWS SDKs, see the AWS Developer Center.

Using the AWS CLI

Use your access keys with the AWS CLI to make API calls. For information about setting up the AWS CLI, see Installing the AWS Command Line Interface in the AWS Command Line Interface User Guide. For a list of available Amazon Chime commands, see the Amazon Chime commands in the AWS CLI Command Reference.

Using REST API

If you use REST to make API calls, you must authenticate your request by providing a signature. Amazon Chime supports signature version 4. For more information, see Signature Version 4 Signing Process in the Amazon Web Services General Reference.

When making REST API calls, use the service name chime and REST endpoint https://service.chime.aws.amazon.com.

Administrative permissions are controlled using AWS Identity and Access Management (IAM). For more information, see Control Access to the Amazon Chime Console in the Amazon Chime Administration Guide.

", + "service": "

The Amazon Chime API (application programming interface) is designed for developers to perform key tasks, such as creating and managing Amazon Chime accounts, users, and Voice Connectors. This guide provides detailed information about the Amazon Chime API, including operations, types, inputs and outputs, and error codes. It also includes some server-side API actions to use with the Amazon Chime SDK. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

You can use an AWS SDK, the AWS Command Line Interface (AWS CLI), or the REST API to make API calls. We recommend using an AWS SDK or the AWS CLI. Each API operation includes links to information about using it with a language-specific AWS SDK or the AWS CLI.

Using an AWS SDK

You don't need to write code to calculate a signature for request authentication. The SDK clients authenticate your requests by using access keys that you provide. For more information about AWS SDKs, see the AWS Developer Center.

Using the AWS CLI

Use your access keys with the AWS CLI to make API calls. For information about setting up the AWS CLI, see Installing the AWS Command Line Interface in the AWS Command Line Interface User Guide. For a list of available Amazon Chime commands, see the Amazon Chime commands in the AWS CLI Command Reference.

Using REST API

If you use REST to make API calls, you must authenticate your request by providing a signature. Amazon Chime supports signature version 4. For more information, see Signature Version 4 Signing Process in the Amazon Web Services General Reference.

When making REST API calls, use the service name chime and REST endpoint https://service.chime.aws.amazon.com.

Administrative permissions are controlled using AWS Identity and Access Management (IAM). For more information, see Identity and Access Management for Amazon Chime in the Amazon Chime Administration Guide.

", "operations": { "AssociatePhoneNumberWithUser": "

Associates a phone number with the specified Amazon Chime user.

", "AssociatePhoneNumbersWithVoiceConnector": "

Associates phone numbers with the specified Amazon Chime Voice Connector.

", "AssociatePhoneNumbersWithVoiceConnectorGroup": "

Associates phone numbers with the specified Amazon Chime Voice Connector group.

", + "BatchCreateAttendee": "

Creates up to 100 new attendees for an active Amazon Chime SDK meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

", + "BatchCreateRoomMembership": "

Adds up to 50 members to a chat room. Members can be either users or bots. The member role designates whether the member is a chat room administrator or a general chat room member.

", "BatchDeletePhoneNumber": "

Moves phone numbers into the Deletion queue. Phone numbers must be disassociated from any users or Amazon Chime Voice Connectors before they can be deleted.

Phone numbers remain in the Deletion queue for 7 days before they are deleted permanently.

", "BatchSuspendUser": "

Suspends up to 50 users from a Team or EnterpriseLWA Amazon Chime account. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

Users suspended from a Team account are dissasociated from the account, but they can continue to use Amazon Chime as free users. To remove the suspension from suspended Team account users, invite them to the Team account again. You can use the InviteUsers action to do so.

Users suspended from an EnterpriseLWA account are immediately signed out of Amazon Chime and can no longer sign in. To remove the suspension from suspended EnterpriseLWA account users, use the BatchUnsuspendUser action.

To sign out users without suspending them, use the LogoutUser action.

", "BatchUnsuspendUser": "

Removes the suspension from up to 50 previously suspended users for the specified Amazon Chime EnterpriseLWA account. Only users on EnterpriseLWA accounts can be unsuspended using this action. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

Previously suspended users who are unsuspended using this action are returned to Registered status. Users who are not previously suspended are ignored.

", "BatchUpdatePhoneNumber": "

Updates phone number product types or calling names. You can update one attribute at a time for each UpdatePhoneNumberRequestItem. For example, you can update either the product type or the calling name.

For product types, choose from Amazon Chime Business Calling and Amazon Chime Voice Connector. For toll-free numbers, you must use the Amazon Chime Voice Connector product type.

Updates to outbound calling names can take up to 72 hours to complete. Pending updates to outbound calling names must be complete before you can request another update.

", "BatchUpdateUser": "

Updates user details within the UpdateUserRequestItem object for up to 20 users for the specified Amazon Chime account. Currently, only LicenseType updates are supported for this action.

", "CreateAccount": "

Creates an Amazon Chime account under the administrator's AWS account. Only Team account types are currently supported for this action. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

", + "CreateAttendee": "

Creates a new attendee for an active Amazon Chime SDK meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

", "CreateBot": "

Creates a bot for an Amazon Chime Enterprise account.

", + "CreateMeeting": "

Creates a new Amazon Chime SDK meeting in the specified media Region with no initial attendees. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

", "CreatePhoneNumberOrder": "

Creates an order for phone numbers to be provisioned. Choose from Amazon Chime Business Calling and Amazon Chime Voice Connector product types. For toll-free numbers, you must use the Amazon Chime Voice Connector product type.

", + "CreateRoom": "

Creates a chat room for the specified Amazon Chime account.

", + "CreateRoomMembership": "

Adds a member to a chat room. A member can be either a user or a bot. The member role designates whether the member is a chat room administrator or a general chat room member.

", "CreateVoiceConnector": "

Creates an Amazon Chime Voice Connector under the administrator's AWS account. You can choose to create an Amazon Chime Voice Connector in a specific AWS Region.

Enabling CreateVoiceConnectorRequest$RequireEncryption configures your Amazon Chime Voice Connector to use TLS transport for SIP signaling and Secure RTP (SRTP) for media. Inbound calls use TLS transport, and unencrypted outbound calls are blocked.

", "CreateVoiceConnectorGroup": "

Creates an Amazon Chime Voice Connector group under the administrator's AWS account. You can associate up to three existing Amazon Chime Voice Connectors with the Amazon Chime Voice Connector group by including VoiceConnectorItems in the request.

You can include Amazon Chime Voice Connectors from different AWS Regions in your group. This creates a fault tolerant mechanism for fallback in case of availability events.

", "DeleteAccount": "

Deletes the specified Amazon Chime account. You must suspend all users before deleting a Team account. You can use the BatchSuspendUser action to do so.

For EnterpriseLWA and EnterpriseAD accounts, you must release the claimed domains for your Amazon Chime account before deletion. As soon as you release the domain, all users under that account are suspended.

Deleted accounts appear in your Disabled accounts list for 90 days. To restore a deleted account from your Disabled accounts list, you must contact AWS Support.

After 90 days, deleted accounts are permanently removed from your Disabled accounts list.

", + "DeleteAttendee": "

Deletes an attendee from the specified Amazon Chime SDK meeting and deletes their JoinToken. Attendees are automatically deleted when a Amazon Chime SDK meeting is deleted. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

", "DeleteEventsConfiguration": "

Deletes the events configuration that allows a bot to receive outgoing events.

", + "DeleteMeeting": "

Deletes the specified Amazon Chime SDK meeting. When a meeting is deleted, its attendees are also deleted and clients can no longer join it. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

", "DeletePhoneNumber": "

Moves the specified phone number into the Deletion queue. A phone number must be disassociated from any users or Amazon Chime Voice Connectors before it can be deleted.

Deleted phone numbers remain in the Deletion queue for 7 days before they are deleted permanently.

", + "DeleteRoom": "

Deletes a chat room.

", + "DeleteRoomMembership": "

Removes a member from a chat room.

", "DeleteVoiceConnector": "

Deletes the specified Amazon Chime Voice Connector. Any phone numbers associated with the Amazon Chime Voice Connector must be disassociated from it before it can be deleted.

", "DeleteVoiceConnectorGroup": "

Deletes the specified Amazon Chime Voice Connector group. Any VoiceConnectorItems and phone numbers associated with the group must be removed before it can be deleted.

", "DeleteVoiceConnectorOrigination": "

Deletes the origination settings for the specified Amazon Chime Voice Connector.

", @@ -29,26 +39,33 @@ "DisassociatePhoneNumbersFromVoiceConnectorGroup": "

Disassociates the specified phone numbers from the specified Amazon Chime Voice Connector group.

", "GetAccount": "

Retrieves details for the specified Amazon Chime account, such as account type and supported licenses.

", "GetAccountSettings": "

Retrieves account settings for the specified Amazon Chime account ID, such as remote control and dial out settings. For more information about these settings, see Use the Policies Page in the Amazon Chime Administration Guide.

", + "GetAttendee": "

Gets the Amazon Chime SDK attendee details for a specified meeting ID and attendee ID. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

", "GetBot": "

Retrieves details for the specified bot, such as bot email address, bot type, status, and display name.

", "GetEventsConfiguration": "

Gets details for an events configuration that allows a bot to receive outgoing events, such as an HTTPS endpoint or Lambda function ARN.

", "GetGlobalSettings": "

Retrieves global settings for the administrator's AWS account, such as Amazon Chime Business Calling and Amazon Chime Voice Connector settings.

", + "GetMeeting": "

Gets the Amazon Chime SDK meeting details for the specified meeting ID. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

", "GetPhoneNumber": "

Retrieves details for the specified phone number ID, such as associations, capabilities, and product type.

", "GetPhoneNumberOrder": "

Retrieves details for the specified phone number order, such as order creation timestamp, phone numbers in E.164 format, product type, and order status.

", "GetPhoneNumberSettings": "

Retrieves the phone number settings for the administrator's AWS account, such as the default outbound calling name.

", + "GetRoom": "

Retrieves room details, such as name.

", "GetUser": "

Retrieves details for the specified user ID, such as primary email address, license type, and personal meeting PIN.

To retrieve user details with an email address instead of a user ID, use the ListUsers action, and then filter by email address.

", "GetUserSettings": "

Retrieves settings for the specified user ID, such as any associated phone number settings.

", "GetVoiceConnector": "

Retrieves details for the specified Amazon Chime Voice Connector, such as timestamps, name, outbound host, and encryption requirements.

", "GetVoiceConnectorGroup": "

Retrieves details for the specified Amazon Chime Voice Connector group, such as timestamps, name, and associated VoiceConnectorItems.

", "GetVoiceConnectorLoggingConfiguration": "

Retrieves the logging configuration details for the specified Amazon Chime Voice Connector. Shows whether SIP message logs are enabled for sending to Amazon CloudWatch Logs.

", "GetVoiceConnectorOrigination": "

Retrieves origination setting details for the specified Amazon Chime Voice Connector.

", - "GetVoiceConnectorStreamingConfiguration": "

Retrieves the streaming configuration details for the specified Amazon Chime Voice Connector. Shows whether media streaming is enabled for sending to Amazon Kinesis, and shows the retention period for the Amazon Kinesis data, in hours.

", + "GetVoiceConnectorStreamingConfiguration": "

Retrieves the streaming configuration details for the specified Amazon Chime Voice Connector. Shows whether media streaming is enabled for sending to Amazon Kinesis. It also shows the retention period, in hours, for the Amazon Kinesis data.

", "GetVoiceConnectorTermination": "

Retrieves termination setting details for the specified Amazon Chime Voice Connector.

", "GetVoiceConnectorTerminationHealth": "

Retrieves information about the last time a SIP OPTIONS ping was received from your SIP infrastructure for the specified Amazon Chime Voice Connector.

", - "InviteUsers": "

Sends email invites to as many as 50 users, inviting them to the specified Amazon Chime Team account. Only Team account types are currently supported for this action.

", + "InviteUsers": "

Sends email to a maximum of 50 users, inviting them to the specified Amazon Chime Team account. Only Team account types are currently supported for this action.

", "ListAccounts": "

Lists the Amazon Chime accounts under the administrator's AWS account. You can filter accounts by account name prefix. To find out which Amazon Chime account a user belongs to, you can filter by the user's email address, which returns one account result.

", + "ListAttendees": "

Lists the attendees for the specified Amazon Chime SDK meeting. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

", "ListBots": "

Lists the bots associated with the administrator's Amazon Chime Enterprise account ID.

", + "ListMeetings": "

Lists up to 100 active Amazon Chime SDK meetings. For more information about the Amazon Chime SDK, see Using the Amazon Chime SDK in the Amazon Chime Developer Guide.

", "ListPhoneNumberOrders": "

Lists the phone number orders for the administrator's Amazon Chime account.

", "ListPhoneNumbers": "

Lists the phone numbers for the specified Amazon Chime account, Amazon Chime user, Amazon Chime Voice Connector, or Amazon Chime Voice Connector group.

", + "ListRoomMemberships": "

Lists the membership details for the specified room, such as member IDs, member email addresses, and member names.

", + "ListRooms": "

Lists the room details for the specified Amazon Chime account. Optionally, filter the results by a member ID (user ID or bot ID) to see a list of rooms that the member belongs to.

", "ListUsers": "

Lists the users that belong to the specified Amazon Chime account. You can specify an email address to list only the user that the email address belongs to.

", "ListVoiceConnectorGroups": "

Lists the Amazon Chime Voice Connector groups for the administrator's AWS account.

", "ListVoiceConnectorTerminationCredentials": "

Lists the SIP credentials for the specified Amazon Chime Voice Connector.

", @@ -57,7 +74,7 @@ "PutEventsConfiguration": "

Creates an events configuration that allows a bot to receive outgoing events sent by Amazon Chime. Choose either an HTTPS endpoint or a Lambda function ARN. For more information, see Bot.

", "PutVoiceConnectorLoggingConfiguration": "

Adds a logging configuration for the specified Amazon Chime Voice Connector. The logging configuration specifies whether SIP message logs are enabled for sending to Amazon CloudWatch Logs.

", "PutVoiceConnectorOrigination": "

Adds origination settings for the specified Amazon Chime Voice Connector.

", - "PutVoiceConnectorStreamingConfiguration": "

Adds a streaming configuration for the specified Amazon Chime Voice Connector. The streaming configuration specifies whether media streaming is enabled for sending to Amazon Kinesis, and sets the retention period for the Amazon Kinesis data, in hours.

", + "PutVoiceConnectorStreamingConfiguration": "

Adds a streaming configuration for the specified Amazon Chime Voice Connector. The streaming configuration specifies whether media streaming is enabled for sending to Amazon Kinesis. It also sets the retention period, in hours, for the Amazon Kinesis data.

", "PutVoiceConnectorTermination": "

Adds termination settings for the specified Amazon Chime Voice Connector.

", "PutVoiceConnectorTerminationCredentials": "

Adds termination SIP credentials for the specified Amazon Chime Voice Connector.

", "RegenerateSecurityToken": "

Regenerates the security token for a bot.

", @@ -69,7 +86,9 @@ "UpdateBot": "

Updates the status of the specified bot, such as starting or stopping the bot from running in your Amazon Chime Enterprise account.

", "UpdateGlobalSettings": "

Updates global settings for the administrator's AWS account, such as Amazon Chime Business Calling and Amazon Chime Voice Connector settings.

", "UpdatePhoneNumber": "

Updates phone number details, such as product type or calling name, for the specified phone number ID. You can update one phone number detail at a time. For example, you can update either the product type or the calling name in one action.

For toll-free numbers, you must use the Amazon Chime Voice Connector product type.

Updates to outbound calling names can take up to 72 hours to complete. Pending updates to outbound calling names must be complete before you can request another update.

", - "UpdatePhoneNumberSettings": "

Updates the phone number settings for the administrator's AWS account, such as the default outbound calling name. You can update the default outbound calling name once every seven days. Outbound calling names can take up to 72 hours to be updated.

", + "UpdatePhoneNumberSettings": "

Updates the phone number settings for the administrator's AWS account, such as the default outbound calling name. You can update the default outbound calling name once every seven days. Outbound calling names can take up to 72 hours to update.

", + "UpdateRoom": "

Updates room details, such as the room name.

", + "UpdateRoomMembership": "

Updates room membership details, such as member role. The member role designates whether the member is a chat room administrator or a general chat room member. Member role can only be updated for user IDs.

", "UpdateUser": "

Updates user details for a specified user ID. Currently, only LicenseType updates are supported for this action.

", "UpdateUserSettings": "

Updates the settings for the specified user, such as phone number settings.

", "UpdateVoiceConnector": "

Updates details for the specified Amazon Chime Voice Connector.

", @@ -117,6 +136,13 @@ "Account$AccountType": "

The Amazon Chime account type. For more information about different account types, see Managing Your Amazon Chime Accounts in the Amazon Chime Administration Guide.

" } }, + "Arn": { + "base": null, + "refs": { + "MeetingNotificationConfiguration$SnsTopicArn": "

The SNS topic ARN.

", + "MeetingNotificationConfiguration$SqsQueueArn": "

The SQS queue ARN.

" + } + }, "AssociatePhoneNumberWithUserRequest": { "base": null, "refs": { @@ -147,11 +173,52 @@ "refs": { } }, + "Attendee": { + "base": "

An Amazon Chime SDK meeting attendee. Includes a unique AttendeeId and JoinToken. The JoinToken allows a client to authenticate and join as the specified attendee. The JoinToken expires when the meeting ends or when DeleteAttendee is called. After that, the attendee is unable to join the meeting.

We recommend securely transferring each JoinToken from your server application to the client so that no other client has access to the token except for the one authorized to represent the attendee.

", + "refs": { + "AttendeeList$member": null, + "CreateAttendeeResponse$Attendee": "

The attendee information, including attendee ID and join token.

", + "GetAttendeeResponse$Attendee": "

The Amazon Chime SDK attendee information.

" + } + }, + "AttendeeList": { + "base": null, + "refs": { + "BatchCreateAttendeeResponse$Attendees": "

The attendee information, including attendees IDs and join tokens.

", + "ListAttendeesResponse$Attendees": "

The Amazon Chime SDK attendee information.

" + } + }, "BadRequestException": { "base": "

The input parameters don't match the service's restrictions.

", "refs": { } }, + "BatchCreateAttendeeErrorList": { + "base": null, + "refs": { + "BatchCreateAttendeeResponse$Errors": "

If the action fails for one or more of the attendees in the request, a list of the attendees is returned, along with error codes and error messages.

" + } + }, + "BatchCreateAttendeeRequest": { + "base": null, + "refs": { + } + }, + "BatchCreateAttendeeResponse": { + "base": null, + "refs": { + } + }, + "BatchCreateRoomMembershipRequest": { + "base": null, + "refs": { + } + }, + "BatchCreateRoomMembershipResponse": { + "base": null, + "refs": { + } + }, "BatchDeletePhoneNumberRequest": { "base": null, "refs": { @@ -276,6 +343,13 @@ "Termination$CallingRegions": "

The countries to which calls are allowed, in ISO 3166-1 alpha-2 format. Required.

" } }, + "ClientRequestToken": { + "base": null, + "refs": { + "CreateMeetingRequest$ClientRequestToken": "

The unique identifier for the client request. Use a different token for different meetings.

", + "CreateRoomRequest$ClientRequestToken": "

The idempotency token for the request.

" + } + }, "ConflictException": { "base": "

The request could not be processed because of conflict in the current state of the resource.

", "refs": { @@ -297,6 +371,34 @@ "refs": { } }, + "CreateAttendeeError": { + "base": "

The list of errors returned when errors are encountered during the BatchCreateAttendee and CreateAttendee actions. This includes external user IDs, error codes, and error messages.

", + "refs": { + "BatchCreateAttendeeErrorList$member": null + } + }, + "CreateAttendeeRequest": { + "base": null, + "refs": { + } + }, + "CreateAttendeeRequestItem": { + "base": "

The Amazon Chime SDK attendee fields to create, used with the BatchCreateAttendee action.

", + "refs": { + "CreateAttendeeRequestItemList$member": null + } + }, + "CreateAttendeeRequestItemList": { + "base": null, + "refs": { + "BatchCreateAttendeeRequest$Attendees": "

The request containing the attendees to create.

" + } + }, + "CreateAttendeeResponse": { + "base": null, + "refs": { + } + }, "CreateBotRequest": { "base": null, "refs": { @@ -307,6 +409,16 @@ "refs": { } }, + "CreateMeetingRequest": { + "base": null, + "refs": { + } + }, + "CreateMeetingResponse": { + "base": null, + "refs": { + } + }, "CreatePhoneNumberOrderRequest": { "base": null, "refs": { @@ -317,6 +429,26 @@ "refs": { } }, + "CreateRoomMembershipRequest": { + "base": null, + "refs": { + } + }, + "CreateRoomMembershipResponse": { + "base": null, + "refs": { + } + }, + "CreateRoomRequest": { + "base": null, + "refs": { + } + }, + "CreateRoomResponse": { + "base": null, + "refs": { + } + }, "CreateVoiceConnectorGroupRequest": { "base": null, "refs": { @@ -352,7 +484,7 @@ "DataRetentionInHours": { "base": null, "refs": { - "StreamingConfiguration$DataRetentionInHours": "

The retention period for the Amazon Kinesis data, in hours.

" + "StreamingConfiguration$DataRetentionInHours": "

The retention period, in hours, for the Amazon Kinesis data.

" } }, "DeleteAccountRequest": { @@ -365,16 +497,36 @@ "refs": { } }, + "DeleteAttendeeRequest": { + "base": null, + "refs": { + } + }, "DeleteEventsConfigurationRequest": { "base": null, "refs": { } }, + "DeleteMeetingRequest": { + "base": null, + "refs": { + } + }, "DeletePhoneNumberRequest": { "base": null, "refs": { } }, + "DeleteRoomMembershipRequest": { + "base": null, + "refs": { + } + }, + "DeleteRoomRequest": { + "base": null, + "refs": { + } + }, "DeleteVoiceConnectorGroupRequest": { "base": null, "refs": { @@ -479,6 +631,7 @@ "BadRequestException$Code": null, "ConflictException$Code": null, "ForbiddenException$Code": null, + "MemberError$ErrorCode": "

The error code.

", "NotFoundException$Code": null, "PhoneNumberError$ErrorCode": "

The error code.

", "ResourceLimitExceededException$Code": null, @@ -497,6 +650,16 @@ "PutEventsConfigurationResponse$EventsConfiguration": null } }, + "ExternalUserIdType": { + "base": null, + "refs": { + "Attendee$ExternalUserId": "

The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.

", + "CreateAttendeeError$ExternalUserId": "

The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.

", + "CreateAttendeeRequest$ExternalUserId": "

The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.

", + "CreateAttendeeRequestItem$ExternalUserId": "

The Amazon Chime SDK external user ID. Links the attendee to an identity managed by a builder application.

", + "CreateMeetingRequest$MeetingHostId": "

Reserved.

" + } + }, "ForbiddenException": { "base": "

The client is permanently forbidden from making the request. For example, when a user tries to create an account from an unsupported Region.

", "refs": { @@ -522,6 +685,16 @@ "refs": { } }, + "GetAttendeeRequest": { + "base": null, + "refs": { + } + }, + "GetAttendeeResponse": { + "base": null, + "refs": { + } + }, "GetBotRequest": { "base": null, "refs": { @@ -547,6 +720,16 @@ "refs": { } }, + "GetMeetingRequest": { + "base": null, + "refs": { + } + }, + "GetMeetingResponse": { + "base": null, + "refs": { + } + }, "GetPhoneNumberOrderRequest": { "base": null, "refs": { @@ -572,6 +755,16 @@ "refs": { } }, + "GetRoomRequest": { + "base": null, + "refs": { + } + }, + "GetRoomResponse": { + "base": null, + "refs": { + } + }, "GetUserRequest": { "base": null, "refs": { @@ -665,7 +858,18 @@ "GuidString": { "base": null, "refs": { + "Attendee$AttendeeId": "

The Amazon Chime SDK attendee ID.

", + "BatchCreateAttendeeRequest$MeetingId": "

The Amazon Chime SDK meeting ID.

", + "CreateAttendeeRequest$MeetingId": "

The Amazon Chime SDK meeting ID.

", + "DeleteAttendeeRequest$MeetingId": "

The Amazon Chime SDK meeting ID.

", + "DeleteAttendeeRequest$AttendeeId": "

The Amazon Chime SDK attendee ID.

", + "DeleteMeetingRequest$MeetingId": "

The Amazon Chime SDK meeting ID.

", + "GetAttendeeRequest$MeetingId": "

The Amazon Chime SDK meeting ID.

", + "GetAttendeeRequest$AttendeeId": "

The Amazon Chime SDK attendee ID.

", + "GetMeetingRequest$MeetingId": "

The Amazon Chime SDK meeting ID.

", "GetPhoneNumberOrderRequest$PhoneNumberOrderId": "

The ID for the phone number order.

", + "ListAttendeesRequest$MeetingId": "

The Amazon Chime SDK meeting ID.

", + "Meeting$MeetingId": "

The Amazon Chime SDK meeting ID.

", "PhoneNumberOrder$PhoneNumberOrderId": "

The phone number order ID.

" } }, @@ -678,7 +882,7 @@ "InviteList": { "base": null, "refs": { - "InviteUsersResponse$Invites": "

The invite details.

" + "InviteUsersResponse$Invites": "

The email invitation details.

" } }, "InviteStatus": { @@ -711,6 +915,9 @@ "PhoneNumberAssociation$AssociatedTimestamp": "

The timestamp of the phone number association, in ISO 8601 format.

", "PhoneNumberOrder$CreatedTimestamp": "

The phone number order creation timestamp, in ISO 8601 format.

", "PhoneNumberOrder$UpdatedTimestamp": "

The updated phone number order timestamp, in ISO 8601 format.

", + "Room$CreatedTimestamp": "

The room creation timestamp, in ISO 8601 format.

", + "Room$UpdatedTimestamp": "

The room update timestamp, in ISO 8601 format.

", + "RoomMembership$UpdatedTimestamp": "

The room membership update timestamp, in ISO 8601 format.

", "TerminationHealth$Timestamp": "

The timestamp, in ISO 8601 format.

", "User$RegisteredOn": "

Date and time when the user is registered, in ISO 8601 format.

", "User$InvitedOn": "

Date and time when the user is invited to the Amazon Chime account, in ISO 8601 format.

", @@ -720,6 +927,12 @@ "VoiceConnectorGroup$UpdatedTimestamp": "

The updated Amazon Chime Voice Connector group timestamp, in ISO 8601 format.

" } }, + "JoinTokenString": { + "base": null, + "refs": { + "Attendee$JoinToken": "

The join token used by the Amazon Chime SDK attendee.

" + } + }, "License": { "base": null, "refs": { @@ -746,6 +959,16 @@ "refs": { } }, + "ListAttendeesRequest": { + "base": null, + "refs": { + } + }, + "ListAttendeesResponse": { + "base": null, + "refs": { + } + }, "ListBotsRequest": { "base": null, "refs": { @@ -756,6 +979,16 @@ "refs": { } }, + "ListMeetingsRequest": { + "base": null, + "refs": { + } + }, + "ListMeetingsResponse": { + "base": null, + "refs": { + } + }, "ListPhoneNumberOrdersRequest": { "base": null, "refs": { @@ -776,6 +1009,26 @@ "refs": { } }, + "ListRoomMembershipsRequest": { + "base": null, + "refs": { + } + }, + "ListRoomMembershipsResponse": { + "base": null, + "refs": { + } + }, + "ListRoomsRequest": { + "base": null, + "refs": { + } + }, + "ListRoomsResponse": { + "base": null, + "refs": { + } + }, "ListUsersRequest": { "base": null, "refs": { @@ -834,19 +1087,92 @@ "refs": { } }, + "MediaPlacement": { + "base": "

A set of endpoints used by clients to connect to the media service group for a Amazon Chime SDK meeting.

", + "refs": { + "Meeting$MediaPlacement": "

The media placement for the meeting.

" + } + }, + "Meeting": { + "base": "

A meeting created using the Amazon Chime SDK.

", + "refs": { + "CreateMeetingResponse$Meeting": "

The meeting information, including the meeting ID and MediaPlacement.

", + "GetMeetingResponse$Meeting": "

The Amazon Chime SDK meeting information.

", + "MeetingList$member": null + } + }, + "MeetingList": { + "base": null, + "refs": { + "ListMeetingsResponse$Meetings": "

The Amazon Chime SDK meeting information.

" + } + }, + "MeetingNotificationConfiguration": { + "base": "

The configuration for resource targets to receive notifications when Amazon Chime SDK meeting and attendee events occur.

", + "refs": { + "CreateMeetingRequest$NotificationsConfiguration": "

The configuration for resource targets to receive notifications when meeting and attendee events occur.

" + } + }, + "Member": { + "base": "

The member details, such as email address, name, member ID, and member type.

", + "refs": { + "RoomMembership$Member": null + } + }, + "MemberError": { + "base": "

The list of errors returned when a member action results in an error.

", + "refs": { + "MemberErrorList$member": null + } + }, + "MemberErrorList": { + "base": null, + "refs": { + "BatchCreateRoomMembershipResponse$Errors": "

If the action fails for one or more of the member IDs in the request, a list of the member IDs is returned, along with error codes and error messages.

" + } + }, + "MemberType": { + "base": null, + "refs": { + "Member$MemberType": "

The member type.

" + } + }, + "MembershipItem": { + "base": "

Membership details, such as member ID and member role.

", + "refs": { + "MembershipItemList$member": null + } + }, + "MembershipItemList": { + "base": null, + "refs": { + "BatchCreateRoomMembershipRequest$MembershipItemList": "

The list of membership items.

" + } + }, "NonEmptyString": { "base": null, "refs": { "AssociatePhoneNumbersWithVoiceConnectorGroupRequest$VoiceConnectorGroupId": "

The Amazon Chime Voice Connector group ID.

", "AssociatePhoneNumbersWithVoiceConnectorRequest$VoiceConnectorId": "

The Amazon Chime Voice Connector ID.

", + "BatchCreateRoomMembershipRequest$AccountId": "

The Amazon Chime account ID.

", + "BatchCreateRoomMembershipRequest$RoomId": "

The room ID.

", "BatchSuspendUserRequest$AccountId": "

The Amazon Chime account ID.

", "BatchUnsuspendUserRequest$AccountId": "

The Amazon Chime account ID.

", "BatchUpdateUserRequest$AccountId": "

The Amazon Chime account ID.

", "CreateBotRequest$AccountId": "

The Amazon Chime account ID.

", "CreateBotRequest$Domain": "

The domain of the Amazon Chime Enterprise account.

", + "CreateRoomMembershipRequest$AccountId": "

The Amazon Chime account ID.

", + "CreateRoomMembershipRequest$RoomId": "

The room ID.

", + "CreateRoomMembershipRequest$MemberId": "

The Amazon Chime member ID (user ID or bot ID).

", + "CreateRoomRequest$AccountId": "

The Amazon Chime account ID.

", "DeleteAccountRequest$AccountId": "

The Amazon Chime account ID.

", "DeleteEventsConfigurationRequest$AccountId": "

The Amazon Chime account ID.

", "DeleteEventsConfigurationRequest$BotId": "

The bot ID.

", + "DeleteRoomMembershipRequest$AccountId": "

The Amazon Chime account ID.

", + "DeleteRoomMembershipRequest$RoomId": "

The room ID.

", + "DeleteRoomMembershipRequest$MemberId": "

The member ID (user ID or bot ID).

", + "DeleteRoomRequest$AccountId": "

The Amazon Chime account ID.

", + "DeleteRoomRequest$RoomId": "

The chat room ID.

", "DeleteVoiceConnectorGroupRequest$VoiceConnectorGroupId": "

The Amazon Chime Voice Connector group ID.

", "DeleteVoiceConnectorOriginationRequest$VoiceConnectorId": "

The Amazon Chime Voice Connector ID.

", "DeleteVoiceConnectorRequest$VoiceConnectorId": "

The Amazon Chime Voice Connector ID.

", @@ -861,6 +1187,8 @@ "GetBotRequest$BotId": "

The bot ID.

", "GetEventsConfigurationRequest$AccountId": "

The Amazon Chime account ID.

", "GetEventsConfigurationRequest$BotId": "

The bot ID.

", + "GetRoomRequest$AccountId": "

The Amazon Chime account ID.

", + "GetRoomRequest$RoomId": "

The room ID.

", "GetUserRequest$AccountId": "

The Amazon Chime account ID.

", "GetUserRequest$UserId": "

The user ID.

", "GetVoiceConnectorGroupRequest$VoiceConnectorGroupId": "

The Amazon Chime Voice Connector group ID.

", @@ -872,10 +1200,17 @@ "GetVoiceConnectorTerminationRequest$VoiceConnectorId": "

The Amazon Chime Voice Connector ID.

", "InviteUsersRequest$AccountId": "

The Amazon Chime account ID.

", "ListBotsRequest$AccountId": "

The Amazon Chime account ID.

", + "ListRoomMembershipsRequest$AccountId": "

The Amazon Chime account ID.

", + "ListRoomMembershipsRequest$RoomId": "

The room ID.

", + "ListRoomsRequest$AccountId": "

The Amazon Chime account ID.

", "ListUsersRequest$AccountId": "

The Amazon Chime account ID.

", "ListVoiceConnectorTerminationCredentialsRequest$VoiceConnectorId": "

The Amazon Chime Voice Connector ID.

", "LogoutUserRequest$AccountId": "

The Amazon Chime account ID.

", "LogoutUserRequest$UserId": "

The user ID.

", + "Member$MemberId": "

The member ID (user ID or bot ID).

", + "Member$AccountId": "

The Amazon Chime account ID.

", + "MemberError$MemberId": "

The member ID.

", + "MembershipItem$MemberId": "

The member ID.

", "PhoneNumberError$PhoneNumberId": "

The phone number ID for which the action failed.

", "PutEventsConfigurationRequest$AccountId": "

The Amazon Chime account ID.

", "PutEventsConfigurationRequest$BotId": "

The bot ID.

", @@ -889,11 +1224,21 @@ "ResetPersonalPINRequest$AccountId": "

The Amazon Chime account ID.

", "ResetPersonalPINRequest$UserId": "

The user ID.

", "RestorePhoneNumberRequest$PhoneNumberId": "

The phone number.

", + "Room$RoomId": "

The room ID.

", + "Room$AccountId": "

The Amazon Chime account ID.

", + "Room$CreatedBy": "

The identifier of the room creator.

", + "RoomMembership$RoomId": "

The room ID.

", + "RoomMembership$InvitedBy": "

The identifier of the user that invited the room member.

", "UpdateAccountRequest$AccountId": "

The Amazon Chime account ID.

", "UpdateAccountSettingsRequest$AccountId": "

The Amazon Chime account ID.

", "UpdateBotRequest$AccountId": "

The Amazon Chime account ID.

", "UpdateBotRequest$BotId": "

The bot ID.

", "UpdatePhoneNumberRequestItem$PhoneNumberId": "

The phone number ID to update.

", + "UpdateRoomMembershipRequest$AccountId": "

The Amazon Chime account ID.

", + "UpdateRoomMembershipRequest$RoomId": "

The room ID.

", + "UpdateRoomMembershipRequest$MemberId": "

The member ID.

", + "UpdateRoomRequest$AccountId": "

The Amazon Chime account ID.

", + "UpdateRoomRequest$RoomId": "

The room ID.

", "UpdateUserRequest$AccountId": "

The Amazon Chime account ID.

", "UpdateUserRequest$UserId": "

The user ID.

", "UpdateUserRequestItem$UserId": "

The user ID.

", @@ -1207,13 +1552,55 @@ "ResultMax": { "base": null, "refs": { - "ListBotsRequest$MaxResults": "

The maximum number of results to return in a single call. Default is 10.

", + "ListAttendeesRequest$MaxResults": "

The maximum number of results to return in a single call.

", + "ListBotsRequest$MaxResults": "

The maximum number of results to return in a single call. The default is 10.

", + "ListMeetingsRequest$MaxResults": "

The maximum number of results to return in a single call.

", "ListPhoneNumberOrdersRequest$MaxResults": "

The maximum number of results to return in a single call.

", "ListPhoneNumbersRequest$MaxResults": "

The maximum number of results to return in a single call.

", + "ListRoomMembershipsRequest$MaxResults": "

The maximum number of results to return in a single call.

", + "ListRoomsRequest$MaxResults": "

The maximum number of results to return in a single call.

", "ListVoiceConnectorGroupsRequest$MaxResults": "

The maximum number of results to return in a single call.

", "ListVoiceConnectorsRequest$MaxResults": "

The maximum number of results to return in a single call.

" } }, + "Room": { + "base": "

The Amazon Chime chat room details.

", + "refs": { + "CreateRoomResponse$Room": "

The room details.

", + "GetRoomResponse$Room": "

The room details.

", + "RoomList$member": null, + "UpdateRoomResponse$Room": "

The room details.

" + } + }, + "RoomList": { + "base": null, + "refs": { + "ListRoomsResponse$Rooms": "

The room details.

" + } + }, + "RoomMembership": { + "base": "

The room membership details.

", + "refs": { + "CreateRoomMembershipResponse$RoomMembership": "

The room membership details.

", + "RoomMembershipList$member": null, + "UpdateRoomMembershipResponse$RoomMembership": "

The room membership details.

" + } + }, + "RoomMembershipList": { + "base": null, + "refs": { + "ListRoomMembershipsResponse$RoomMemberships": "

The room membership details.

" + } + }, + "RoomMembershipRole": { + "base": null, + "refs": { + "CreateRoomMembershipRequest$Role": "

The role of the member.

", + "MembershipItem$Role": "

The member role.

", + "RoomMembership$Role": "

The membership role.

", + "UpdateRoomMembershipRequest$Role": "

The role of the member.

" + } + }, "SearchAvailablePhoneNumbersRequest": { "base": null, "refs": { @@ -1231,13 +1618,18 @@ "Bot$BotEmail": "

The bot email address.

", "Bot$SecurityToken": "

The security token used to authenticate Amazon Chime with the outgoing event endpoint.

", "CreateBotRequest$DisplayName": "

The bot display name.

", + "CreateRoomRequest$Name": "

The room name.

", "Credential$Username": "

The RFC2617 compliant user name associated with the SIP credentials, in US-ASCII format.

", "Credential$Password": "

The RFC2617 compliant password associated with the SIP credentials, in US-ASCII format.

", "EventsConfiguration$OutboundEventsHTTPSEndpoint": "

HTTPS endpoint that allows a bot to receive outgoing events.

", "EventsConfiguration$LambdaFunctionArn": "

Lambda function ARN that allows a bot to receive outgoing events.

", + "Member$Email": "

The member email address.

", + "Member$FullName": "

The member name.

", "PutEventsConfigurationRequest$OutboundEventsHTTPSEndpoint": "

HTTPS endpoint that allows the bot to receive outgoing events.

", "PutEventsConfigurationRequest$LambdaFunctionArn": "

Lambda function ARN that allows the bot to receive outgoing events.

", + "Room$Name": "

The room name.

", "SensitiveStringList$member": null, + "UpdateRoomRequest$Name": "

The room name.

", "User$PrimaryProvisionedNumber": "

The primary phone number associated with the user.

", "User$DisplayName": "

The display name of the user.

" } @@ -1281,6 +1673,9 @@ "Bot$UserId": "

The unique ID for the bot user.

", "BusinessCallingSettings$CdrBucket": "

The Amazon S3 bucket designated for call detail record storage.

", "ConflictException$Message": null, + "CreateAttendeeError$ErrorCode": "

The error code.

", + "CreateAttendeeError$ErrorMessage": "

The error message.

", + "CreateMeetingRequest$MediaRegion": "

The Region in which to create the meeting. Available values: us-east-1, us-west-2.

", "DeletePhoneNumberRequest$PhoneNumberId": "

The phone number ID.

", "DisassociatePhoneNumberFromUserRequest$AccountId": "

The Amazon Chime account ID.

", "DisassociatePhoneNumberFromUserRequest$UserId": "

The user ID.

", @@ -1292,19 +1687,30 @@ "Invite$InviteId": "

The invite ID.

", "ListAccountsRequest$NextToken": "

The token to use to retrieve the next page of results.

", "ListAccountsResponse$NextToken": "

The token to use to retrieve the next page of results.

", + "ListAttendeesRequest$NextToken": "

The token to use to retrieve the next page of results.

", + "ListAttendeesResponse$NextToken": "

The token to use to retrieve the next page of results.

", "ListBotsRequest$NextToken": "

The token to use to retrieve the next page of results.

", "ListBotsResponse$NextToken": "

The token to use to retrieve the next page of results.

", + "ListMeetingsRequest$NextToken": "

The token to use to retrieve the next page of results.

", + "ListMeetingsResponse$NextToken": "

The token to use to retrieve the next page of results.

", "ListPhoneNumberOrdersRequest$NextToken": "

The token to use to retrieve the next page of results.

", "ListPhoneNumberOrdersResponse$NextToken": "

The token to use to retrieve the next page of results.

", "ListPhoneNumbersRequest$FilterValue": "

The value to use for the filter.

", "ListPhoneNumbersRequest$NextToken": "

The token to use to retrieve the next page of results.

", "ListPhoneNumbersResponse$NextToken": "

The token to use to retrieve the next page of results.

", + "ListRoomMembershipsRequest$NextToken": "

The token to use to retrieve the next page of results.

", + "ListRoomMembershipsResponse$NextToken": "

The token to use to retrieve the next page of results.

", + "ListRoomsRequest$MemberId": "

The member ID (user ID or bot ID).

", + "ListRoomsRequest$NextToken": "

The token to use to retrieve the next page of results.

", + "ListRoomsResponse$NextToken": "

The token to use to retrieve the next page of results.

", "ListUsersRequest$NextToken": "

The token to use to retrieve the next page of results.

", "ListUsersResponse$NextToken": "

The token to use to retrieve the next page of results.

", "ListVoiceConnectorGroupsRequest$NextToken": "

The token to use to retrieve the next page of results.

", "ListVoiceConnectorGroupsResponse$NextToken": "

The token to use to retrieve the next page of results.

", "ListVoiceConnectorsRequest$NextToken": "

The token to use to retrieve the next page of results.

", "ListVoiceConnectorsResponse$NextToken": "

The token to use to retrieve the next page of results.

", + "Meeting$MediaRegion": "

The Region in which to create the meeting. Available values: us-east-1, us-west-2.

", + "MemberError$ErrorMessage": "

The error message.

", "NonEmptyStringList$member": null, "NotFoundException$Message": null, "OriginationRoute$Host": "

The FQDN or IP address to contact for origination traffic.

", @@ -1444,6 +1850,26 @@ "refs": { } }, + "UpdateRoomMembershipRequest": { + "base": null, + "refs": { + } + }, + "UpdateRoomMembershipResponse": { + "base": null, + "refs": { + } + }, + "UpdateRoomRequest": { + "base": null, + "refs": { + } + }, + "UpdateRoomResponse": { + "base": null, + "refs": { + } + }, "UpdateUserRequest": { "base": null, "refs": { @@ -1491,6 +1917,17 @@ "refs": { } }, + "UriType": { + "base": null, + "refs": { + "MediaPlacement$AudioHostUrl": "

The audio host URL.

", + "MediaPlacement$ScreenDataUrl": "

The screen data URL.

", + "MediaPlacement$ScreenSharingUrl": "

The screen sharing URL.

", + "MediaPlacement$ScreenViewingUrl": "

The screen viewing URL.

", + "MediaPlacement$SignalingUrl": "

The signaling URL.

", + "MediaPlacement$TurnControlUrl": "

The turn control URL.

" + } + }, "User": { "base": "

The user on the Amazon Chime account.

", "refs": { @@ -1503,7 +1940,7 @@ "UserEmailList": { "base": null, "refs": { - "InviteUsersRequest$UserEmailList": "

The user email addresses to which to send the invite.

" + "InviteUsersRequest$UserEmailList": "

The user email addresses to which to send the email invitation.

" } }, "UserError": { diff --git a/models/apis/chime/2018-05-01/paginators-1.json b/models/apis/chime/2018-05-01/paginators-1.json index 1a3a06e0cfe..7d55169a037 100644 --- a/models/apis/chime/2018-05-01/paginators-1.json +++ b/models/apis/chime/2018-05-01/paginators-1.json @@ -5,11 +5,21 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListAttendees": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListBots": { "input_token": "NextToken", "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListMeetings": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListPhoneNumberOrders": { "input_token": "NextToken", "output_token": "NextToken", @@ -20,6 +30,16 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListRoomMemberships": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListRooms": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListUsers": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/cloudformation/2010-05-15/api-2.json b/models/apis/cloudformation/2010-05-15/api-2.json index 0e71f2be72f..f6c8345483a 100644 --- a/models/apis/cloudformation/2010-05-15/api-2.json +++ b/models/apis/cloudformation/2010-05-15/api-2.json @@ -170,6 +170,23 @@ {"shape":"OperationInProgressException"} ] }, + "DeregisterType":{ + "name":"DeregisterType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterTypeInput"}, + "output":{ + "shape":"DeregisterTypeOutput", + "resultWrapper":"DeregisterTypeResult" + }, + "errors":[ + {"shape":"CFNRegistryException"}, + {"shape":"TypeNotFoundException"} + ], + "idempotent":true + }, "DescribeAccountLimits":{ "name":"DescribeAccountLimits", "http":{ @@ -316,6 +333,39 @@ "resultWrapper":"DescribeStacksResult" } }, + "DescribeType":{ + "name":"DescribeType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTypeInput"}, + "output":{ + "shape":"DescribeTypeOutput", + "resultWrapper":"DescribeTypeResult" + }, + "errors":[ + {"shape":"CFNRegistryException"}, + {"shape":"TypeNotFoundException"} + ], + "idempotent":true + }, + "DescribeTypeRegistration":{ + "name":"DescribeTypeRegistration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeTypeRegistrationInput"}, + "output":{ + "shape":"DescribeTypeRegistrationOutput", + "resultWrapper":"DescribeTypeRegistrationResult" + }, + "errors":[ + {"shape":"CFNRegistryException"} + ], + "idempotent":true + }, "DetectStackDrift":{ "name":"DetectStackDrift", "http":{ @@ -340,6 +390,23 @@ "resultWrapper":"DetectStackResourceDriftResult" } }, + "DetectStackSetDrift":{ + "name":"DetectStackSetDrift", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DetectStackSetDriftInput"}, + "output":{ + "shape":"DetectStackSetDriftOutput", + "resultWrapper":"DetectStackSetDriftResult" + }, + "errors":[ + {"shape":"InvalidOperationException"}, + {"shape":"OperationInProgressException"}, + {"shape":"StackSetNotFoundException"} + ] + }, "EstimateTemplateCost":{ "name":"EstimateTemplateCost", "http":{ @@ -530,6 +597,87 @@ "resultWrapper":"ListStacksResult" } }, + "ListTypeRegistrations":{ + "name":"ListTypeRegistrations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTypeRegistrationsInput"}, + "output":{ + "shape":"ListTypeRegistrationsOutput", + "resultWrapper":"ListTypeRegistrationsResult" + }, + "errors":[ + {"shape":"CFNRegistryException"} + ], + "idempotent":true + }, + "ListTypeVersions":{ + "name":"ListTypeVersions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTypeVersionsInput"}, + "output":{ + "shape":"ListTypeVersionsOutput", + "resultWrapper":"ListTypeVersionsResult" + }, + "errors":[ + {"shape":"CFNRegistryException"} + ], + "idempotent":true + }, + "ListTypes":{ + "name":"ListTypes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTypesInput"}, + "output":{ + "shape":"ListTypesOutput", + "resultWrapper":"ListTypesResult" + }, + "errors":[ + {"shape":"CFNRegistryException"} + ], + "idempotent":true + }, + "RecordHandlerProgress":{ + "name":"RecordHandlerProgress", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RecordHandlerProgressInput"}, + "output":{ + "shape":"RecordHandlerProgressOutput", + "resultWrapper":"RecordHandlerProgressResult" + }, + "errors":[ + {"shape":"InvalidStateTransitionException"}, + {"shape":"OperationStatusCheckFailedException"} + ], + "idempotent":true + }, + "RegisterType":{ + "name":"RegisterType", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterTypeInput"}, + "output":{ + "shape":"RegisterTypeOutput", + "resultWrapper":"RegisterTypeResult" + }, + "errors":[ + {"shape":"CFNRegistryException"} + ], + "idempotent":true + }, "SetStackPolicy":{ "name":"SetStackPolicy", "http":{ @@ -538,6 +686,23 @@ }, "input":{"shape":"SetStackPolicyInput"} }, + "SetTypeDefaultVersion":{ + "name":"SetTypeDefaultVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetTypeDefaultVersionInput"}, + "output":{ + "shape":"SetTypeDefaultVersionOutput", + "resultWrapper":"SetTypeDefaultVersionResult" + }, + "errors":[ + {"shape":"CFNRegistryException"}, + {"shape":"TypeNotFoundException"} + ], + "idempotent":true + }, "SignalResource":{ "name":"SignalResource", "http":{ @@ -707,6 +872,18 @@ "max":100, "min":1 }, + "CFNRegistryException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "error":{ + "code":"CFNRegistryException", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "CancelUpdateStackInput":{ "type":"structure", "required":["StackName"], @@ -1039,6 +1216,27 @@ } }, "DeletionTime":{"type":"timestamp"}, + "DeprecatedStatus":{ + "type":"string", + "enum":[ + "LIVE", + "DEPRECATED" + ] + }, + "DeregisterTypeInput":{ + "type":"structure", + "members":{ + "Arn":{"shape":"PrivateTypeArn"}, + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "VersionId":{"shape":"TypeVersionId"} + } + }, + "DeregisterTypeOutput":{ + "type":"structure", + "members":{ + } + }, "DescribeAccountLimitsInput":{ "type":"structure", "members":{ @@ -1233,6 +1431,51 @@ "NextToken":{"shape":"NextToken"} } }, + "DescribeTypeInput":{ + "type":"structure", + "members":{ + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "Arn":{"shape":"TypeArn"}, + "VersionId":{"shape":"TypeVersionId"} + } + }, + "DescribeTypeOutput":{ + "type":"structure", + "members":{ + "Arn":{"shape":"TypeArn"}, + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "DefaultVersionId":{"shape":"TypeVersionId"}, + "Description":{"shape":"Description"}, + "Schema":{"shape":"TypeSchema"}, + "ProvisioningType":{"shape":"ProvisioningType"}, + "DeprecatedStatus":{"shape":"DeprecatedStatus"}, + "LoggingConfig":{"shape":"LoggingConfig"}, + "ExecutionRoleArn":{"shape":"RoleArn"}, + "Visibility":{"shape":"Visibility"}, + "SourceUrl":{"shape":"OptionalSecureUrl"}, + "DocumentationUrl":{"shape":"OptionalSecureUrl"}, + "LastUpdated":{"shape":"Timestamp"}, + "TimeCreated":{"shape":"Timestamp"} + } + }, + "DescribeTypeRegistrationInput":{ + "type":"structure", + "required":["RegistrationToken"], + "members":{ + "RegistrationToken":{"shape":"RegistrationToken"} + } + }, + "DescribeTypeRegistrationOutput":{ + "type":"structure", + "members":{ + "ProgressStatus":{"shape":"RegistrationStatus"}, + "Description":{"shape":"Description"}, + "TypeArn":{"shape":"TypeArn"}, + "TypeVersionArn":{"shape":"TypeArn"} + } + }, "Description":{ "type":"string", "max":1024, @@ -1271,6 +1514,24 @@ "StackResourceDrift":{"shape":"StackResourceDrift"} } }, + "DetectStackSetDriftInput":{ + "type":"structure", + "required":["StackSetName"], + "members":{ + "StackSetName":{"shape":"StackSetNameOrId"}, + "OperationPreferences":{"shape":"StackSetOperationPreferences"}, + "OperationId":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + } + }, + "DetectStackSetDriftOutput":{ + "type":"structure", + "members":{ + "OperationId":{"shape":"ClientRequestToken"} + } + }, "DifferenceType":{ "type":"string", "enum":[ @@ -1280,7 +1541,16 @@ ] }, "DisableRollback":{"type":"boolean"}, + "DriftedStackInstancesCount":{ + "type":"integer", + "min":0 + }, "EnableTerminationProtection":{"type":"boolean"}, + "ErrorMessage":{ + "type":"string", + "max":255, + "min":1 + }, "EstimateTemplateCostInput":{ "type":"structure", "members":{ @@ -1348,6 +1618,10 @@ "type":"list", "member":{"shape":"Export"} }, + "FailedStackInstancesCount":{ + "type":"integer", + "min":0 + }, "FailureToleranceCount":{ "type":"integer", "min":0 @@ -1408,10 +1682,37 @@ "ResourceIdentifierSummaries":{"shape":"ResourceIdentifierSummaries"} } }, + "HandlerErrorCode":{ + "type":"string", + "enum":[ + "NotUpdatable", + "InvalidRequest", + "AccessDenied", + "InvalidCredentials", + "AlreadyExists", + "NotFound", + "ResourceConflict", + "Throttling", + "ServiceLimitExceeded", + "NotStabilized", + "GeneralServiceException", + "ServiceInternalError", + "NetworkFailure", + "InternalFailure" + ] + }, "Imports":{ "type":"list", "member":{"shape":"StackName"} }, + "InProgressStackInstancesCount":{ + "type":"integer", + "min":0 + }, + "InSyncStackInstancesCount":{ + "type":"integer", + "min":0 + }, "InsufficientCapabilitiesException":{ "type":"structure", "members":{ @@ -1445,6 +1746,17 @@ }, "exception":true }, + "InvalidStateTransitionException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"InvalidStateTransition", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "Key":{"type":"string"}, "LastUpdatedTime":{"type":"timestamp"}, "LimitExceededException":{ @@ -1601,6 +1913,76 @@ "NextToken":{"shape":"NextToken"} } }, + "ListTypeRegistrationsInput":{ + "type":"structure", + "members":{ + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "TypeArn":{"shape":"TypeArn"}, + "RegistrationStatusFilter":{"shape":"RegistrationStatus"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTypeRegistrationsOutput":{ + "type":"structure", + "members":{ + "RegistrationTokenList":{"shape":"RegistrationTokenList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTypeVersionsInput":{ + "type":"structure", + "members":{ + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "Arn":{"shape":"PrivateTypeArn"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"}, + "DeprecatedStatus":{"shape":"DeprecatedStatus"} + } + }, + "ListTypeVersionsOutput":{ + "type":"structure", + "members":{ + "TypeVersionSummaries":{"shape":"TypeVersionSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTypesInput":{ + "type":"structure", + "members":{ + "Visibility":{"shape":"Visibility"}, + "ProvisioningType":{"shape":"ProvisioningType"}, + "DeprecatedStatus":{"shape":"DeprecatedStatus"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListTypesOutput":{ + "type":"structure", + "members":{ + "TypeSummaries":{"shape":"TypeSummaries"}, + "NextToken":{"shape":"NextToken"} + } + }, + "LogGroupName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[\\.\\-_/#A-Za-z0-9]+" + }, + "LoggingConfig":{ + "type":"structure", + "required":[ + "LogRoleArn", + "LogGroupName" + ], + "members":{ + "LogRoleArn":{"shape":"RoleArn"}, + "LogGroupName":{"shape":"LogGroupName"} + } + }, "LogicalResourceId":{"type":"string"}, "LogicalResourceIds":{ "type":"list", @@ -1692,6 +2074,30 @@ }, "exception":true }, + "OperationStatus":{ + "type":"string", + "enum":[ + "PENDING", + "IN_PROGRESS", + "SUCCESS", + "FAILED" + ] + }, + "OperationStatusCheckFailedException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"ConditionalCheckFailed", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "OptionalSecureUrl":{ + "type":"string", + "max":4096 + }, "Output":{ "type":"structure", "members":{ @@ -1761,6 +2167,11 @@ "Value":{"shape":"Value"} } }, + "PrivateTypeArn":{ + "type":"string", + "max":1024, + "pattern":"arn:aws[A-Za-z0-9-]{0,64}:cloudformation:[A-Za-z0-9-]{1,64}:[0-9]{12}:type/.+" + }, "Properties":{"type":"string"}, "PropertyDifference":{ "type":"structure", @@ -1784,12 +2195,84 @@ "PropertyName":{"type":"string"}, "PropertyPath":{"type":"string"}, "PropertyValue":{"type":"string"}, + "ProvisioningType":{ + "type":"string", + "enum":[ + "NON_PROVISIONABLE", + "IMMUTABLE", + "FULLY_MUTABLE" + ] + }, "Reason":{"type":"string"}, + "RecordHandlerProgressInput":{ + "type":"structure", + "required":[ + "BearerToken", + "OperationStatus" + ], + "members":{ + "BearerToken":{"shape":"ClientToken"}, + "OperationStatus":{"shape":"OperationStatus"}, + "CurrentOperationStatus":{"shape":"OperationStatus"}, + "StatusMessage":{"shape":"StatusMessage"}, + "ErrorCode":{"shape":"HandlerErrorCode"}, + "ResourceModel":{"shape":"ResourceModel"}, + "ClientRequestToken":{"shape":"ClientRequestToken"} + } + }, + "RecordHandlerProgressOutput":{ + "type":"structure", + "members":{ + } + }, "Region":{"type":"string"}, "RegionList":{ "type":"list", "member":{"shape":"Region"} }, + "RegisterTypeInput":{ + "type":"structure", + "required":[ + "TypeName", + "SchemaHandlerPackage" + ], + "members":{ + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "SchemaHandlerPackage":{"shape":"S3Url"}, + "LoggingConfig":{"shape":"LoggingConfig"}, + "ExecutionRoleArn":{"shape":"RoleArn"}, + "ClientRequestToken":{"shape":"RequestToken"} + } + }, + "RegisterTypeOutput":{ + "type":"structure", + "members":{ + "RegistrationToken":{"shape":"RegistrationToken"} + } + }, + "RegistrationStatus":{ + "type":"string", + "enum":[ + "COMPLETE", + "IN_PROGRESS", + "FAILED" + ] + }, + "RegistrationToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9][-a-zA-Z0-9]*" + }, + "RegistrationTokenList":{ + "type":"list", + "member":{"shape":"RegistrationToken"} + }, + "RegistryType":{ + "type":"string", + "enum":["RESOURCE"] + }, "Replacement":{ "type":"string", "enum":[ @@ -1798,6 +2281,12 @@ "Conditional" ] }, + "RequestToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9][-a-zA-Z0-9]*" + }, "RequiresRecreation":{ "type":"string", "enum":[ @@ -1875,6 +2364,11 @@ "type":"list", "member":{"shape":"ResourceIdentifierPropertyKey"} }, + "ResourceModel":{ + "type":"string", + "max":16384, + "min":1 + }, "ResourceProperties":{"type":"string"}, "ResourceSignalStatus":{ "type":"string", @@ -1964,6 +2458,12 @@ "max":2048, "min":20 }, + "RoleArn":{ + "type":"string", + "max":256, + "min":1, + "pattern":"arn:.+:iam::[0-9]{12}:role/.+" + }, "RollbackConfiguration":{ "type":"structure", "members":{ @@ -1987,6 +2487,11 @@ "member":{"shape":"RollbackTrigger"}, "max":5 }, + "S3Url":{ + "type":"string", + "max":4096, + "min":1 + }, "Scope":{ "type":"list", "member":{"shape":"ResourceAttribute"} @@ -2000,6 +2505,20 @@ "StackPolicyURL":{"shape":"StackPolicyURL"} } }, + "SetTypeDefaultVersionInput":{ + "type":"structure", + "members":{ + "Arn":{"shape":"PrivateTypeArn"}, + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "VersionId":{"shape":"TypeVersionId"} + } + }, + "SetTypeDefaultVersionOutput":{ + "type":"structure", + "members":{ + } + }, "SignalResourceInput":{ "type":"structure", "required":[ @@ -2122,7 +2641,9 @@ "StackId":{"shape":"StackId"}, "ParameterOverrides":{"shape":"Parameters"}, "Status":{"shape":"StackInstanceStatus"}, - "StatusReason":{"shape":"Reason"} + "StatusReason":{"shape":"Reason"}, + "DriftStatus":{"shape":"StackDriftStatus"}, + "LastDriftCheckTimestamp":{"shape":"Timestamp"} } }, "StackInstanceNotFoundException":{ @@ -2156,7 +2677,9 @@ "Account":{"shape":"Account"}, "StackId":{"shape":"StackId"}, "Status":{"shape":"StackInstanceStatus"}, - "StatusReason":{"shape":"Reason"} + "StatusReason":{"shape":"Reason"}, + "DriftStatus":{"shape":"StackDriftStatus"}, + "LastDriftCheckTimestamp":{"shape":"Timestamp"} } }, "StackName":{"type":"string"}, @@ -2324,10 +2847,42 @@ "Tags":{"shape":"Tags"}, "StackSetARN":{"shape":"StackSetARN"}, "AdministrationRoleARN":{"shape":"RoleARN"}, - "ExecutionRoleName":{"shape":"ExecutionRoleName"} + "ExecutionRoleName":{"shape":"ExecutionRoleName"}, + "StackSetDriftDetectionDetails":{"shape":"StackSetDriftDetectionDetails"} } }, "StackSetARN":{"type":"string"}, + "StackSetDriftDetectionDetails":{ + "type":"structure", + "members":{ + "DriftStatus":{"shape":"StackSetDriftStatus"}, + "DriftDetectionStatus":{"shape":"StackSetDriftDetectionStatus"}, + "LastDriftCheckTimestamp":{"shape":"Timestamp"}, + "TotalStackInstancesCount":{"shape":"TotalStackInstancesCount"}, + "DriftedStackInstancesCount":{"shape":"DriftedStackInstancesCount"}, + "InSyncStackInstancesCount":{"shape":"InSyncStackInstancesCount"}, + "InProgressStackInstancesCount":{"shape":"InProgressStackInstancesCount"}, + "FailedStackInstancesCount":{"shape":"FailedStackInstancesCount"} + } + }, + "StackSetDriftDetectionStatus":{ + "type":"string", + "enum":[ + "COMPLETED", + "FAILED", + "PARTIAL_SUCCESS", + "IN_PROGRESS", + "STOPPED" + ] + }, + "StackSetDriftStatus":{ + "type":"string", + "enum":[ + "DRIFTED", + "IN_SYNC", + "NOT_CHECKED" + ] + }, "StackSetId":{"type":"string"}, "StackSetName":{"type":"string"}, "StackSetNameOrId":{ @@ -2368,7 +2923,8 @@ "AdministrationRoleARN":{"shape":"RoleARN"}, "ExecutionRoleName":{"shape":"ExecutionRoleName"}, "CreationTimestamp":{"shape":"Timestamp"}, - "EndTimestamp":{"shape":"Timestamp"} + "EndTimestamp":{"shape":"Timestamp"}, + "StackSetDriftDetectionDetails":{"shape":"StackSetDriftDetectionDetails"} } }, "StackSetOperationAction":{ @@ -2376,7 +2932,8 @@ "enum":[ "CREATE", "UPDATE", - "DELETE" + "DELETE", + "DETECT_DRIFT" ] }, "StackSetOperationPreferences":{ @@ -2454,7 +3011,9 @@ "StackSetName":{"shape":"StackSetName"}, "StackSetId":{"shape":"StackSetId"}, "Description":{"shape":"Description"}, - "Status":{"shape":"StackSetStatus"} + "Status":{"shape":"StackSetStatus"}, + "DriftStatus":{"shape":"StackDriftStatus"}, + "LastDriftCheckTimestamp":{"shape":"Timestamp"} } }, "StackStatus":{ @@ -2533,6 +3092,10 @@ }, "exception":true }, + "StatusMessage":{ + "type":"string", + "max":1024 + }, "StopStackSetOperationInput":{ "type":"structure", "required":[ @@ -2621,12 +3184,79 @@ }, "exception":true }, + "TotalStackInstancesCount":{ + "type":"integer", + "min":0 + }, "TransformName":{"type":"string"}, "TransformsList":{ "type":"list", "member":{"shape":"TransformName"} }, "Type":{"type":"string"}, + "TypeArn":{ + "type":"string", + "max":1024, + "pattern":"arn:aws[A-Za-z0-9-]{0,64}:cloudformation:[A-Za-z0-9-]{1,64}:([0-9]{12})?:type/.+" + }, + "TypeName":{ + "type":"string", + "max":196, + "min":10, + "pattern":"[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}" + }, + "TypeNotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TypeNotFoundException", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "TypeSchema":{ + "type":"string", + "max":16777216, + "min":1 + }, + "TypeSummaries":{ + "type":"list", + "member":{"shape":"TypeSummary"} + }, + "TypeSummary":{ + "type":"structure", + "members":{ + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "DefaultVersionId":{"shape":"TypeVersionId"}, + "TypeArn":{"shape":"TypeArn"}, + "LastUpdated":{"shape":"Timestamp"}, + "Description":{"shape":"Description"} + } + }, + "TypeVersionId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[A-Za-z0-9-]+" + }, + "TypeVersionSummaries":{ + "type":"list", + "member":{"shape":"TypeVersionSummary"} + }, + "TypeVersionSummary":{ + "type":"structure", + "members":{ + "Type":{"shape":"RegistryType"}, + "TypeName":{"shape":"TypeName"}, + "VersionId":{"shape":"TypeVersionId"}, + "Arn":{"shape":"TypeArn"}, + "TimeCreated":{"shape":"Timestamp"}, + "Description":{"shape":"Description"} + } + }, "UpdateStackInput":{ "type":"structure", "required":["StackName"], @@ -2747,6 +3377,13 @@ } }, "Value":{"type":"string"}, - "Version":{"type":"string"} + "Version":{"type":"string"}, + "Visibility":{ + "type":"string", + "enum":[ + "PUBLIC", + "PRIVATE" + ] + } } } diff --git a/models/apis/cloudformation/2010-05-15/docs-2.json b/models/apis/cloudformation/2010-05-15/docs-2.json index 89363b0f2e6..77cf095dffa 100644 --- a/models/apis/cloudformation/2010-05-15/docs-2.json +++ b/models/apis/cloudformation/2010-05-15/docs-2.json @@ -12,6 +12,7 @@ "DeleteStack": "

Deletes a specified stack. Once the call completes successfully, stack deletion starts. Deleted stacks do not show up in the DescribeStacks API if the deletion has been completed successfully.

", "DeleteStackInstances": "

Deletes stack instances for the specified accounts, in the specified regions.

", "DeleteStackSet": "

Deletes a stack set. Before you can delete a stack set, all of its member stack instances must be deleted. For more information about how to do this, see DeleteStackInstances.

", + "DeregisterType": "

Removes a type or type version from active use in the CloudFormation registry. If a type or type version is deregistered, it cannot be used in CloudFormation operations.

To deregister a type, you must individually deregister all registered versions of that type. If a type has only a single registered version, deregistering that version results in the type itself being deregistered.

You cannot deregister the default version of a type, unless it is the only registered version of that type, in which case the type itself is deregistered as well.

", "DescribeAccountLimits": "

Retrieves your account's AWS CloudFormation limits, such as the maximum number of stacks that you can create in your account. For more information about account limits, see AWS CloudFormation Limits in the AWS CloudFormation User Guide.

", "DescribeChangeSet": "

Returns the inputs for the change set and a list of changes that AWS CloudFormation will make if you execute the change set. For more information, see Updating Stacks Using Change Sets in the AWS CloudFormation User Guide.

", "DescribeStackDriftDetectionStatus": "

Returns information about a stack drift detection operation. A stack drift detection operation detects whether a stack's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. A stack is considered to have drifted if one or more of its resources have drifted. For more information on stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Use DetectStackDrift to initiate a stack drift detection operation. DetectStackDrift returns a StackDriftDetectionId you can use to monitor the progress of the operation using DescribeStackDriftDetectionStatus. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources.

", @@ -23,8 +24,11 @@ "DescribeStackSet": "

Returns the description of the specified stack set.

", "DescribeStackSetOperation": "

Returns the description of the specified stack set operation.

", "DescribeStacks": "

Returns the description for the specified stack; if no stack name was specified, then it returns the description for all the stacks created.

If the stack does not exist, an AmazonCloudFormationException is returned.

", + "DescribeType": "

Returns detailed information about a type that has been registered.

If you specify a VersionId, DescribeType returns information about that specific type version. Otherwise, it returns information about the default type version.

", + "DescribeTypeRegistration": "

Returns information about a type's registration, including its current status and type and version identifiers.

When you initiate a registration request using RegisterType , you can then use DescribeTypeRegistration to monitor the progress of that registration request.

Once the registration request has completed, use DescribeType to return detailed informaiton about a type.

", "DetectStackDrift": "

Detects whether a stack's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. For each resource in the stack that supports drift detection, AWS CloudFormation compares the actual configuration of the resource with its expected template configuration. Only resource properties explicitly defined in the stack template are checked for drift. A stack is considered to have drifted if one or more of its resources differ from their expected template configurations. For more information, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Use DetectStackDrift to detect drift on all supported resources for a given stack, or DetectStackResourceDrift to detect drift on individual resources.

For a list of stack resources that currently support drift detection, see Resources that Support Drift Detection.

DetectStackDrift can take up to several minutes, depending on the number of resources contained within the stack. Use DescribeStackDriftDetectionStatus to monitor the progress of a detect stack drift operation. Once the drift detection operation has completed, use DescribeStackResourceDrifts to return drift information about the stack and its resources.

When detecting drift on a stack, AWS CloudFormation does not detect drift on any nested stacks belonging to that stack. Perform DetectStackDrift directly on the nested stack itself.

", "DetectStackResourceDrift": "

Returns information about whether a resource's actual configuration differs, or has drifted, from it's expected configuration, as defined in the stack template and any values specified as template parameters. This information includes actual and expected property values for resources in which AWS CloudFormation detects drift. Only resource properties explicitly defined in the stack template are checked for drift. For more information about stack and resource drift, see Detecting Unregulated Configuration Changes to Stacks and Resources.

Use DetectStackResourceDrift to detect drift on individual resources, or DetectStackDrift to detect drift on all resources in a given stack that support drift detection.

Resources that do not currently support drift detection cannot be checked. For a list of resources that support drift detection, see Resources that Support Drift Detection.

", + "DetectStackSetDrift": "

Detect drift on a stack set. When CloudFormation performs drift detection on a stack set, it performs drift detection on the stack associated with each stack instance in the stack set. For more information, see How CloudFormation Performs Drift Detection on a Stack Set.

DetectStackSetDrift returns the OperationId of the stack set drift detection operation. Use this operation id with DescribeStackSetOperation to monitor the progress of the drift detection operation. The drift detection operation may take some time, depending on the number of stack instances included in the stack set, as well as the number of resources included in each stack.

Once the operation has completed, use the following actions to return drift information:

For more information on performing a drift detection operation on a stack set, see Detecting Unmanaged Changes in Stack Sets.

You can only run a single drift detection operation on a given stack set at one time.

To stop a drift detection stack set operation, use StopStackSetOperation .

", "EstimateTemplateCost": "

Returns the estimated monthly cost of a template. The return value is an AWS Simple Monthly Calculator URL with a query string that describes the resources required to run the template.

", "ExecuteChangeSet": "

Updates a stack using the input information that was provided when the specified change set was created. After the call successfully completes, AWS CloudFormation starts updating the stack. Use the DescribeStacks action to view the status of the update.

When you execute a change set, AWS CloudFormation deletes all other change sets associated with the stack because they aren't valid for the updated stack.

If a stack policy is associated with the stack, AWS CloudFormation enforces the policy during the update. You can't specify a temporary stack policy that overrides the current policy.

", "GetStackPolicy": "

Returns the stack policy for a specified stack. If a stack doesn't have a policy, a null value is returned.

", @@ -39,7 +43,13 @@ "ListStackSetOperations": "

Returns summary information about operations performed on a stack set.

", "ListStackSets": "

Returns summary information about stack sets that are associated with the user.

", "ListStacks": "

Returns the summary information for stacks whose status matches the specified StackStatusFilter. Summary information for stacks that have been deleted is kept for 90 days after the stack is deleted. If no StackStatusFilter is specified, summary information for all stacks is returned (including existing stacks and stacks that have been deleted).

", + "ListTypeRegistrations": "

Returns a list of registration tokens for the specified type.

", + "ListTypeVersions": "

Returns summary information about the versions of a type.

", + "ListTypes": "

Returns summary information about types that have been registered with CloudFormation.

", + "RecordHandlerProgress": "

Reports progress of a resource handler to CloudFormation.

Reserved for use by the CloudFormation CLI. Do not use this API in your code.

", + "RegisterType": "

Registers a type with the CloudFormation service. Registering a type makes it available for use in CloudFormation templates in your AWS account, and includes:

For more information on how to develop types and ready them for registeration, see Creating Resource Providers in the CloudFormation CLI User Guide.

Once you have initiated a registration request using RegisterType , you can use DescribeTypeRegistration to monitor the progress of the registration request.

", "SetStackPolicy": "

Sets a stack policy for a specified stack.

", + "SetTypeDefaultVersion": "

Specify the default version of a type. The default version of a type will be used in CloudFormation operations.

", "SignalResource": "

Sends a signal to the specified resource with a success or failure status. You can use the SignalResource API in conjunction with a creation policy or update policy. AWS CloudFormation doesn't proceed with a stack creation or update until resources receive the required number of signals or the timeout period is exceeded. The SignalResource API is useful in cases where you want to send signals from anywhere other than an Amazon EC2 instance.

", "StopStackSetOperation": "

Stops an in-progress operation on a stack set and its associated stack instances.

", "UpdateStack": "

Updates a stack as specified in the template. After the call completes successfully, the stack update starts. You can check the status of the stack via the DescribeStacks action.

To get a copy of the template for an existing stack, you can use the GetTemplate action.

For more information about creating an update template, updating a stack, and monitoring the progress of the update, see Updating a Stack.

", @@ -134,6 +144,11 @@ "DescribeStackResourceDriftsInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" } }, + "CFNRegistryException": { + "base": "

An error occurred during a CloudFormation registry operation.

", + "refs": { + } + }, "CancelUpdateStackInput": { "base": "

The input for the CancelUpdateStack action.

", "refs": { @@ -279,8 +294,11 @@ "DeleteStackInstancesInput$OperationId": "

The unique identifier for this stack set operation.

If you don't specify an operation ID, the SDK generates one automatically.

The operation ID also functions as an idempotency token, to ensure that AWS CloudFormation performs the stack set operation only once, even if you retry the request multiple times. You can retry stack set operation requests to ensure that AWS CloudFormation successfully received them.

Repeating this stack set operation with a new operation ID retries all stack instances whose status is OUTDATED.

", "DeleteStackInstancesOutput$OperationId": "

The unique identifier for this stack set operation.

", "DescribeStackSetOperationInput$OperationId": "

The unique ID of the stack set operation.

", + "DetectStackSetDriftInput$OperationId": "

The ID of the stack set operation.

", + "DetectStackSetDriftOutput$OperationId": "

The ID of the drift detection stack set operation.

you can use this operation id with DescribeStackSetOperation to monitor the progress of the drift detection operation.

", "ExecuteChangeSetInput$ClientRequestToken": "

A unique identifier for this ExecuteChangeSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to execute a change set to update a stack with the same name. You might retry ExecuteChangeSet requests to ensure that AWS CloudFormation successfully received them.

", "ListStackSetOperationResultsInput$OperationId": "

The ID of the stack set operation.

", + "RecordHandlerProgressInput$ClientRequestToken": "

Reserved for use by the CloudFormation CLI.

", "StackEvent$ClientRequestToken": "

The token passed to the operation that generated this event.

All events triggered by a given stack operation are assigned the same client request token, which you can use to track operations. For example, if you execute a CreateStack operation with the token token1, then all the StackEvents generated by that operation will have ClientRequestToken set as token1.

In the console, stack operations display the client request token on the Events tab. Stack operations that are initiated from the console use the token format Console-StackOperation-ID, which helps you easily identify the stack operation . For example, if you create a stack using the console, each stack event would be assigned the same token in the following format: Console-CreateStack-7f59c3cf-00d2-40c7-b2ff-e75db0987002.

", "StackSetOperation$OperationId": "

The unique ID of a stack set operation.

", "StackSetOperationSummary$OperationId": "

The unique ID of the stack set operation.

", @@ -295,7 +313,8 @@ "ClientToken": { "base": null, "refs": { - "CreateChangeSetInput$ClientToken": "

A unique identifier for this CreateChangeSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another change set with the same name. You might retry CreateChangeSet requests to ensure that AWS CloudFormation successfully received them.

" + "CreateChangeSetInput$ClientToken": "

A unique identifier for this CreateChangeSet request. Specify this token if you plan to retry requests so that AWS CloudFormation knows that you're not attempting to create another change set with the same name. You might retry CreateChangeSet requests to ensure that AWS CloudFormation successfully received them.

", + "RecordHandlerProgressInput$BearerToken": "

Reserved for use by the CloudFormation CLI.

" } }, "ContinueUpdateRollbackInput": { @@ -404,6 +423,24 @@ "StackSummary$DeletionTime": "

The time the stack was deleted.

" } }, + "DeprecatedStatus": { + "base": null, + "refs": { + "DescribeTypeOutput$DeprecatedStatus": "

The deprecation status of the type.

Valid values include:

", + "ListTypeVersionsInput$DeprecatedStatus": "

The deprecation status of the type versions that you want to get summary information about.

Valid values include:

", + "ListTypesInput$DeprecatedStatus": "

The deprecation status of the types that you want to get summary information about.

Valid values include:

" + } + }, + "DeregisterTypeInput": { + "base": null, + "refs": { + } + }, + "DeregisterTypeOutput": { + "base": null, + "refs": { + } + }, "DescribeAccountLimitsInput": { "base": "

The input for the DescribeAccountLimits action.

", "refs": { @@ -514,6 +551,26 @@ "refs": { } }, + "DescribeTypeInput": { + "base": null, + "refs": { + } + }, + "DescribeTypeOutput": { + "base": null, + "refs": { + } + }, + "DescribeTypeRegistrationInput": { + "base": null, + "refs": { + } + }, + "DescribeTypeRegistrationOutput": { + "base": null, + "refs": { + } + }, "Description": { "base": null, "refs": { @@ -521,6 +578,8 @@ "CreateChangeSetInput$Description": "

A description to help you identify this change set.

", "CreateStackSetInput$Description": "

A description of the stack set. You can use the description to identify the stack set's purpose or other important information.

", "DescribeChangeSetOutput$Description": "

Information about the change set.

", + "DescribeTypeOutput$Description": "

The description of the registered type.

", + "DescribeTypeRegistrationOutput$Description": "

The description of the type registration request.

", "GetTemplateSummaryOutput$Description": "

The value that is defined in the Description property of the template.

", "Output$Description": "

User defined description associated with the output.

", "ParameterDeclaration$Description": "

The description that is associate with the parameter.

", @@ -530,6 +589,8 @@ "StackSet$Description": "

A description of the stack set that you specify when the stack set is created or updated.

", "StackSetSummary$Description": "

A description of the stack set that you specify when the stack set is created or updated.

", "TemplateParameter$Description": "

User defined description associated with the parameter.

", + "TypeSummary$Description": "

The description of the type.

", + "TypeVersionSummary$Description": "

The description of the type version.

", "UpdateStackSetInput$Description": "

A brief description of updates that you are making.

", "ValidateTemplateOutput$Description": "

The description found within the template.

" } @@ -554,6 +615,16 @@ "refs": { } }, + "DetectStackSetDriftInput": { + "base": null, + "refs": { + } + }, + "DetectStackSetDriftOutput": { + "base": null, + "refs": { + } + }, "DifferenceType": { "base": null, "refs": { @@ -567,6 +638,12 @@ "Stack$DisableRollback": "

Boolean to enable or disable rollback on stack creation failures:

" } }, + "DriftedStackInstancesCount": { + "base": null, + "refs": { + "StackSetDriftDetectionDetails$DriftedStackInstancesCount": "

The number of stack instances that have drifted from the expected template and parameter configuration of the stack set. A stack instance is considered to have drifted if one or more of the resources in the associated stack do not match their expected configuration.

" + } + }, "EnableTerminationProtection": { "base": null, "refs": { @@ -575,6 +652,12 @@ "UpdateTerminationProtectionInput$EnableTerminationProtection": "

Whether to enable termination protection on the specified stack.

" } }, + "ErrorMessage": { + "base": null, + "refs": { + "CFNRegistryException$Message": null + } + }, "EstimateTemplateCostInput": { "base": "

The input for an EstimateTemplateCost action.

", "refs": { @@ -649,6 +732,12 @@ "ListExportsOutput$Exports": "

The output for the ListExports action.

" } }, + "FailedStackInstancesCount": { + "base": null, + "refs": { + "StackSetDriftDetectionDetails$FailedStackInstancesCount": "

The number of stack instances for which the drift detection operation failed.

" + } + }, "FailureToleranceCount": { "base": null, "refs": { @@ -691,12 +780,30 @@ "refs": { } }, + "HandlerErrorCode": { + "base": null, + "refs": { + "RecordHandlerProgressInput$ErrorCode": "

Reserved for use by the CloudFormation CLI.

" + } + }, "Imports": { "base": null, "refs": { "ListImportsOutput$Imports": "

A list of stack names that are importing the specified exported output value.

" } }, + "InProgressStackInstancesCount": { + "base": null, + "refs": { + "StackSetDriftDetectionDetails$InProgressStackInstancesCount": "

The number of stack instances that are currently being checked for drift.

" + } + }, + "InSyncStackInstancesCount": { + "base": null, + "refs": { + "StackSetDriftDetectionDetails$InSyncStackInstancesCount": "

The number of stack instances which match the expected template and parameter configuration of the stack set.

" + } + }, "InsufficientCapabilitiesException": { "base": "

The template contains resources with capabilities that weren't specified in the Capabilities parameter.

", "refs": { @@ -712,6 +819,11 @@ "refs": { } }, + "InvalidStateTransitionException": { + "base": "

Error reserved for use by the CloudFormation CLI. CloudFormation does not return this error to users.

", + "refs": { + } + }, "Key": { "base": null, "refs": { @@ -832,6 +944,49 @@ "refs": { } }, + "ListTypeRegistrationsInput": { + "base": null, + "refs": { + } + }, + "ListTypeRegistrationsOutput": { + "base": null, + "refs": { + } + }, + "ListTypeVersionsInput": { + "base": null, + "refs": { + } + }, + "ListTypeVersionsOutput": { + "base": null, + "refs": { + } + }, + "ListTypesInput": { + "base": null, + "refs": { + } + }, + "ListTypesOutput": { + "base": null, + "refs": { + } + }, + "LogGroupName": { + "base": null, + "refs": { + "LoggingConfig$LogGroupName": "

The Amazon CloudWatch log group to which CloudFormation sends error logging information when invoking the type's handlers.

" + } + }, + "LoggingConfig": { + "base": "

Contains logging configuration information for a type.

", + "refs": { + "DescribeTypeOutput$LoggingConfig": "

Contains logging configuration information for a type.

", + "RegisterTypeInput$LoggingConfig": "

Specifies logging configuration information for a type.

" + } + }, "LogicalResourceId": { "base": null, "refs": { @@ -875,7 +1030,10 @@ "ListStackInstancesInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

", "ListStackSetOperationResultsInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

", "ListStackSetOperationsInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

", - "ListStackSetsInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" + "ListStackSetsInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

", + "ListTypeRegistrationsInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

", + "ListTypeVersionsInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

", + "ListTypesInput$MaxResults": "

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

" } }, "Metadata": { @@ -926,7 +1084,13 @@ "ListStackSetsInput$NextToken": "

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call ListStackSets again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

", "ListStackSetsOutput$NextToken": "

If the request doesn't return all of the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListStackInstances again and assign that token to the request object's NextToken parameter. If the request returns all results, NextToken is set to null.

", "ListStacksInput$NextToken": "

A string that identifies the next page of stacks that you want to retrieve.

", - "ListStacksOutput$NextToken": "

If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If no additional page exists, this value is null.

" + "ListStacksOutput$NextToken": "

If the output exceeds 1 MB in size, a string that identifies the next page of stacks. If no additional page exists, this value is null.

", + "ListTypeRegistrationsInput$NextToken": "

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

", + "ListTypeRegistrationsOutput$NextToken": "

If the request doesn't return all of the remaining results, NextToken is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If the request returns all results, NextToken is set to null.

", + "ListTypeVersionsInput$NextToken": "

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

", + "ListTypeVersionsOutput$NextToken": "

If the request doesn't return all of the remaining results, NextToken is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If the request returns all results, NextToken is set to null.

", + "ListTypesInput$NextToken": "

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

", + "ListTypesOutput$NextToken": "

If the request doesn't return all of the remaining results, NextToken is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If the request returns all results, NextToken is set to null.

" } }, "NoEcho": { @@ -973,6 +1137,25 @@ "refs": { } }, + "OperationStatus": { + "base": null, + "refs": { + "RecordHandlerProgressInput$OperationStatus": "

Reserved for use by the CloudFormation CLI.

", + "RecordHandlerProgressInput$CurrentOperationStatus": "

Reserved for use by the CloudFormation CLI.

" + } + }, + "OperationStatusCheckFailedException": { + "base": "

Error reserved for use by the CloudFormation CLI. CloudFormation does not return this error to users.

", + "refs": { + } + }, + "OptionalSecureUrl": { + "base": null, + "refs": { + "DescribeTypeOutput$SourceUrl": "

The URL of the source code for the type.

", + "DescribeTypeOutput$DocumentationUrl": "

The URL of a page providing detailed documentation for this type.

" + } + }, "Output": { "base": "

The Output data type.

", "refs": { @@ -1085,6 +1268,14 @@ "PhysicalResourceIdContext$member": null } }, + "PrivateTypeArn": { + "base": null, + "refs": { + "DeregisterTypeInput$Arn": "

The Amazon Resource Name (ARN) of the type.

Conditional: You must specify TypeName or Arn.

", + "ListTypeVersionsInput$Arn": "

The Amazon Resource Name (ARN) of the type for which you want version summary information.

Conditional: You must specify TypeName or Arn.

", + "SetTypeDefaultVersionInput$Arn": "

The Amazon Resource Name (ARN) of the type for which you want version summary information.

Conditional: You must specify TypeName or Arn.

" + } + }, "Properties": { "base": null, "refs": { @@ -1123,6 +1314,13 @@ "PropertyDifference$ActualValue": "

The actual property value of the resource property.

" } }, + "ProvisioningType": { + "base": null, + "refs": { + "DescribeTypeOutput$ProvisioningType": "

The provisioning behavior of the type. AWS CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.

Valid values include:

", + "ListTypesInput$ProvisioningType": "

The provisioning behavior of the type. AWS CloudFormation determines the provisioning type during registration, based on the types of handlers in the schema handler package submitted.

Valid values include:

" + } + }, "Reason": { "base": null, "refs": { @@ -1131,6 +1329,16 @@ "StackSetOperationResultSummary$StatusReason": "

The reason for the assigned result status.

" } }, + "RecordHandlerProgressInput": { + "base": null, + "refs": { + } + }, + "RecordHandlerProgressOutput": { + "base": null, + "refs": { + } + }, "Region": { "base": null, "refs": { @@ -1152,12 +1360,63 @@ "UpdateStackSetInput$Regions": "

The regions in which to update associated stack instances. If you specify regions, you must also specify accounts in which to update stack set instances.

To update all the stack instances associated with this stack set, do not specify the Accounts or Regions properties.

If the stack set update includes changes to the template (that is, if the TemplateBody or TemplateURL properties are specified), or the Parameters property, AWS CloudFormation marks all stack instances with a status of OUTDATED prior to updating the stack instances in the specified accounts and regions. If the stack set update does not include changes to the template or parameters, AWS CloudFormation updates the stack instances in the specified accounts and regions, while leaving all other stack instances with their existing stack instance status.

" } }, + "RegisterTypeInput": { + "base": null, + "refs": { + } + }, + "RegisterTypeOutput": { + "base": null, + "refs": { + } + }, + "RegistrationStatus": { + "base": null, + "refs": { + "DescribeTypeRegistrationOutput$ProgressStatus": "

The current status of the type registration request.

", + "ListTypeRegistrationsInput$RegistrationStatusFilter": "

The current status of the type registration request.

" + } + }, + "RegistrationToken": { + "base": null, + "refs": { + "DescribeTypeRegistrationInput$RegistrationToken": "

The identifier for this registration request.

This registration token is generated by CloudFormation when you initiate a registration request using RegisterType .

", + "RegisterTypeOutput$RegistrationToken": "

The identifier for this registration request.

Use this registration token when calling DescribeTypeRegistration , which returns information about the status and IDs of the type registration.

", + "RegistrationTokenList$member": null + } + }, + "RegistrationTokenList": { + "base": null, + "refs": { + "ListTypeRegistrationsOutput$RegistrationTokenList": "

A list of type registration tokens.

Use DescribeTypeRegistration to return detailed information about a type registration request.

" + } + }, + "RegistryType": { + "base": null, + "refs": { + "DeregisterTypeInput$Type": "

The kind of type.

Currently the only valid value is RESOURCE.

", + "DescribeTypeInput$Type": "

The kind of type.

Currently the only valid value is RESOURCE.

", + "DescribeTypeOutput$Type": "

The kind of type.

Currently the only valid value is RESOURCE.

", + "ListTypeRegistrationsInput$Type": "

The kind of type.

Currently the only valid value is RESOURCE.

", + "ListTypeVersionsInput$Type": "

The kind of the type.

Currently the only valid value is RESOURCE.

", + "RegisterTypeInput$Type": "

The kind of type.

Currently, the only valid value is RESOURCE.

", + "SetTypeDefaultVersionInput$Type": "

The kind of type.

", + "TypeSummary$Type": "

The kind of type.

", + "TypeVersionSummary$Type": "

The kind of type.

" + } + }, "Replacement": { "base": null, "refs": { "ResourceChange$Replacement": "

For the Modify action, indicates whether AWS CloudFormation will replace the resource by creating a new one and deleting the old one. This value depends on the value of the RequiresRecreation property in the ResourceTargetDefinition structure. For example, if the RequiresRecreation field is Always and the Evaluation field is Static, Replacement is True. If the RequiresRecreation field is Always and the Evaluation field is Dynamic, Replacement is Conditionally.

If you have multiple changes with different RequiresRecreation values, the Replacement value depends on the change with the most impact. A RequiresRecreation value of Always has the most impact, followed by Conditionally, and then Never.

" } }, + "RequestToken": { + "base": null, + "refs": { + "RegisterTypeInput$ClientRequestToken": "

A unique identifier that acts as an idempotency key for this registration request. Specifying a client request token prevents CloudFormation from generating more than one version of a type from the same registeration request, even if the request is submitted multiple times.

" + } + }, "RequiresRecreation": { "base": null, "refs": { @@ -1226,6 +1485,12 @@ "ResourceIdentifierSummary$ResourceIdentifiers": "

The resource properties you can provide during the import to identify your target resources. For example, BucketName is a possible identifier property for AWS::S3::Bucket resources.

" } }, + "ResourceModel": { + "base": null, + "refs": { + "RecordHandlerProgressInput$ResourceModel": "

Reserved for use by the CloudFormation CLI.

" + } + }, "ResourceProperties": { "base": null, "refs": { @@ -1348,6 +1613,14 @@ "UpdateStackSetInput$AdministrationRoleARN": "

The Amazon Resource Number (ARN) of the IAM role to use to update this stack set.

Specify an IAM role only if you are using customized administrator roles to control which users or groups can manage specific stack sets within the same administrator account. For more information, see Granting Permissions for Stack Set Operations in the AWS CloudFormation User Guide.

If you specified a customized administrator role when you created the stack set, you must specify a customized administrator role, even if it is the same customized administrator role used with this stack set previously.

" } }, + "RoleArn": { + "base": null, + "refs": { + "DescribeTypeOutput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the IAM execution role used to register the type. If your resource type calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your resource type with the appropriate credentials.

", + "LoggingConfig$LogRoleArn": "

The ARN of the role that CloudFormation should assume when sending log entries to CloudWatch logs.

", + "RegisterTypeInput$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the IAM execution role to use to register the type. If your resource type calls AWS APIs in any of its handlers, you must create an IAM execution role that includes the necessary permissions to call those AWS APIs, and provision that execution role in your account. CloudFormation then assumes that execution role to provide your resource type with the appropriate credentials.

" + } + }, "RollbackConfiguration": { "base": "

Structure containing the rollback triggers for AWS CloudFormation to monitor during stack creation and updating operations, and for the specified monitoring period afterwards.

Rollback triggers enable you to have AWS CloudFormation monitor the state of your application during stack creation and updating, and to roll back that operation if the application breaches the threshold of any of the alarms you've specified. For more information, see Monitor and Roll Back Stack Operations.

", "refs": { @@ -1370,6 +1643,12 @@ "RollbackConfiguration$RollbackTriggers": "

The triggers to monitor during stack creation or update actions.

By default, AWS CloudFormation saves the rollback triggers specified for a stack and applies them to any subsequent update operations for the stack, unless you specify otherwise. If you do specify rollback triggers for this parameter, those triggers replace any list of triggers previously specified for the stack. This means:

If a specified trigger is missing, the entire stack operation fails and is rolled back.

" } }, + "S3Url": { + "base": null, + "refs": { + "RegisterTypeInput$SchemaHandlerPackage": "

A url to the S3 bucket containing the schema handler package that contains the schema, event handlers, and associated files for the type you want to register.

For information on generating a schema handler package for the type you want to register, see submit in the CloudFormation CLI User Guide.

" + } + }, "Scope": { "base": null, "refs": { @@ -1381,6 +1660,16 @@ "refs": { } }, + "SetTypeDefaultVersionInput": { + "base": null, + "refs": { + } + }, + "SetTypeDefaultVersionOutput": { + "base": null, + "refs": { + } + }, "SignalResourceInput": { "base": "

The input for the SignalResource action.

", "refs": { @@ -1429,7 +1718,10 @@ "refs": { "DescribeStackDriftDetectionStatusOutput$StackDriftStatus": "

Status of the stack's actual configuration compared to its expected configuration.

", "StackDriftInformation$StackDriftStatus": "

Status of the stack's actual configuration compared to its expected template configuration.

", - "StackDriftInformationSummary$StackDriftStatus": "

Status of the stack's actual configuration compared to its expected template configuration.

" + "StackDriftInformationSummary$StackDriftStatus": "

Status of the stack's actual configuration compared to its expected template configuration.

", + "StackInstance$DriftStatus": "

Status of the stack instance's actual configuration compared to the expected template and parameter configuration of the stack set to which it belongs.

", + "StackInstanceSummary$DriftStatus": "

Status of the stack instance's actual configuration compared to the expected template and parameter configuration of the stack set to which it belongs.

", + "StackSetSummary$DriftStatus": "

Status of the stack set's actual configuration compared to its expected template and parameter configuration. A stack set is considered to have drifted if one or more of its stack instances have drifted from their expected template and parameter configuration.

" } }, "StackEvent": { @@ -1653,6 +1945,25 @@ "StackSet$StackSetARN": "

The Amazon Resource Number (ARN) of the stack set.

" } }, + "StackSetDriftDetectionDetails": { + "base": "

Detailed information about the drift status of the stack set.

For stack sets, contains information about the last completed drift operation performed on the stack set. Information about drift operations in-progress is not included.

For stack set operations, includes information about drift operations currently being performed on the stack set.

For more information, see Detecting Unmanaged Changes in Stack Sets in the AWS CloudFormation User Guide.

", + "refs": { + "StackSet$StackSetDriftDetectionDetails": "

Detailed information about the drift status of the stack set.

For stack sets, contains information about the last completed drift operation performed on the stack set. Information about drift operations currently in progress is not included.

", + "StackSetOperation$StackSetDriftDetectionDetails": "

Detailed information about the drift status of the stack set. This includes information about drift operations currently being performed on the stack set.

this information will only be present for stack set operations whose Action type is DETECT_DRIFT.

For more information, see Detecting Unmanaged Changes in Stack Sets in the AWS CloudFormation User Guide.

" + } + }, + "StackSetDriftDetectionStatus": { + "base": null, + "refs": { + "StackSetDriftDetectionDetails$DriftDetectionStatus": "

The status of the stack set drift detection operation.

" + } + }, + "StackSetDriftStatus": { + "base": null, + "refs": { + "StackSetDriftDetectionDetails$DriftStatus": "

Status of the stack set's actual configuration compared to its expected template and parameter configuration. A stack set is considered to have drifted if one or more of its stack instances have drifted from their expected template and parameter configuration.

" + } + }, "StackSetId": { "base": null, "refs": { @@ -1686,6 +1997,7 @@ "StackSetNameOrId": { "base": null, "refs": { + "DetectStackSetDriftInput$StackSetName": "

The name of the stack set on which to perform the drift detection operation.

", "GetTemplateSummaryInput$StackSetName": "

The name or unique ID of the stack set from which the stack was created.

Conditional: You must specify only one of the following parameters: StackName, StackSetName, TemplateBody, or TemplateURL.

", "UpdateStackInstancesInput$StackSetName": "

The name or unique ID of the stack set associated with the stack instances.

" } @@ -1718,6 +2030,7 @@ "refs": { "CreateStackInstancesInput$OperationPreferences": "

Preferences for how AWS CloudFormation performs this stack set operation.

", "DeleteStackInstancesInput$OperationPreferences": "

Preferences for how AWS CloudFormation performs this stack set operation.

", + "DetectStackSetDriftInput$OperationPreferences": null, "StackSetOperation$OperationPreferences": "

The preferences for how AWS CloudFormation performs this stack set operation.

", "UpdateStackInstancesInput$OperationPreferences": "

Preferences for how AWS CloudFormation performs this stack set operation.

", "UpdateStackSetInput$OperationPreferences": "

Preferences for how AWS CloudFormation performs this stack set operation.

" @@ -1830,6 +2143,12 @@ "refs": { } }, + "StatusMessage": { + "base": null, + "refs": { + "RecordHandlerProgressInput$StatusMessage": "

Reserved for use by the CloudFormation CLI.

" + } + }, "StopStackSetOperationInput": { "base": null, "refs": { @@ -1935,19 +2254,27 @@ "base": null, "refs": { "DescribeStackDriftDetectionStatusOutput$Timestamp": "

Time at which the stack drift detection operation was initiated.

", + "DescribeTypeOutput$LastUpdated": "

When the specified type version was registered.

", + "DescribeTypeOutput$TimeCreated": "

When the specified type version was registered.

", "StackDriftInformation$LastCheckTimestamp": "

Most recent time when a drift detection operation was initiated on the stack, or any of its individual resources that support drift detection.

", "StackDriftInformationSummary$LastCheckTimestamp": "

Most recent time when a drift detection operation was initiated on the stack, or any of its individual resources that support drift detection.

", "StackEvent$Timestamp": "

Time the status was updated.

", + "StackInstance$LastDriftCheckTimestamp": "

Most recent time when CloudFormation performed a drift detection operation on the stack instance. This value will be NULL for any stack instance on which drift detection has not yet been performed.

", + "StackInstanceSummary$LastDriftCheckTimestamp": "

Most recent time when CloudFormation performed a drift detection operation on the stack instance. This value will be NULL for any stack instance on which drift detection has not yet been performed.

", "StackResource$Timestamp": "

Time the status was updated.

", "StackResourceDetail$LastUpdatedTimestamp": "

Time the status was updated.

", "StackResourceDrift$Timestamp": "

Time at which AWS CloudFormation performed drift detection on the stack resource.

", "StackResourceDriftInformation$LastCheckTimestamp": "

When AWS CloudFormation last checked if the resource had drifted from its expected configuration.

", "StackResourceDriftInformationSummary$LastCheckTimestamp": "

When AWS CloudFormation last checked if the resource had drifted from its expected configuration.

", "StackResourceSummary$LastUpdatedTimestamp": "

Time the status was updated.

", + "StackSetDriftDetectionDetails$LastDriftCheckTimestamp": "

Most recent time when CloudFormation performed a drift detection operation on the stack set. This value will be NULL for any stack set on which drift detection has not yet been performed.

", "StackSetOperation$CreationTimestamp": "

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested regions, before actually creating the first stacks.

", "StackSetOperation$EndTimestamp": "

The time at which the stack set operation ended, across all accounts and regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or region.

", "StackSetOperationSummary$CreationTimestamp": "

The time at which the operation was initiated. Note that the creation times for the stack set operation might differ from the creation time of the individual stacks themselves. This is because AWS CloudFormation needs to perform preparatory work for the operation, such as dispatching the work to the requested regions, before actually creating the first stacks.

", - "StackSetOperationSummary$EndTimestamp": "

The time at which the stack set operation ended, across all accounts and regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or region.

" + "StackSetOperationSummary$EndTimestamp": "

The time at which the stack set operation ended, across all accounts and regions specified. Note that this doesn't necessarily mean that the stack set operation was successful, or even attempted, in each account or region.

", + "StackSetSummary$LastDriftCheckTimestamp": "

Most recent time when CloudFormation performed a drift detection operation on the stack set. This value will be NULL for any stack set on which drift detection has not yet been performed.

", + "TypeSummary$LastUpdated": "

When the current default version of the type was registered.

", + "TypeVersionSummary$TimeCreated": "

When the version was registered.

" } }, "TokenAlreadyExistsException": { @@ -1955,6 +2282,12 @@ "refs": { } }, + "TotalStackInstancesCount": { + "base": null, + "refs": { + "StackSetDriftDetectionDetails$TotalStackInstancesCount": "

The total number of stack instances belonging to this stack set.

The total number of stack instances is equal to the total of:

" + } + }, "TransformName": { "base": null, "refs": { @@ -1974,6 +2307,78 @@ "RollbackTrigger$Type": "

The resource type of the rollback trigger. Currently, AWS::CloudWatch::Alarm is the only supported resource type.

" } }, + "TypeArn": { + "base": null, + "refs": { + "DescribeTypeInput$Arn": "

The Amazon Resource Name (ARN) of the type.

Conditional: You must specify TypeName or Arn.

", + "DescribeTypeOutput$Arn": "

The Amazon Resource Name (ARN) of the type.

", + "DescribeTypeRegistrationOutput$TypeArn": "

The Amazon Resource Name (ARN) of the type being registered.

For registration requests with a ProgressStatus of other than COMPLETE, this will be null.

", + "DescribeTypeRegistrationOutput$TypeVersionArn": "

The Amazon Resource Name (ARN) of this specific version of the type being registered.

For registration requests with a ProgressStatus of other than COMPLETE, this will be null.

", + "ListTypeRegistrationsInput$TypeArn": "

The Amazon Resource Name (ARN) of the type.

Conditional: You must specify TypeName or Arn.

", + "TypeSummary$TypeArn": "

The Amazon Resource Name (ARN) of the type.

", + "TypeVersionSummary$Arn": "

The Amazon Resource Name (ARN) of the type version.

" + } + }, + "TypeName": { + "base": null, + "refs": { + "DeregisterTypeInput$TypeName": "

The name of the type.

Conditional: You must specify TypeName or Arn.

", + "DescribeTypeInput$TypeName": "

The name of the type.

Conditional: You must specify TypeName or Arn.

", + "DescribeTypeOutput$TypeName": "

The name of the registered type.

", + "ListTypeRegistrationsInput$TypeName": "

The name of the type.

Conditional: You must specify TypeName or Arn.

", + "ListTypeVersionsInput$TypeName": "

The name of the type for which you want version summary information.

Conditional: You must specify TypeName or Arn.

", + "RegisterTypeInput$TypeName": "

The name of the type being registered.

We recommend that type names adhere to the following pattern: company_or_organization::service::type.

The following organization namespaces are reserved and cannot be used in your resource type names:

", + "SetTypeDefaultVersionInput$TypeName": "

The name of the type.

Conditional: You must specify TypeName or Arn.

", + "TypeSummary$TypeName": "

The name of the type.

", + "TypeVersionSummary$TypeName": "

The name of the type.

" + } + }, + "TypeNotFoundException": { + "base": "

The specified type does not exist in the CloudFormation registry.

", + "refs": { + } + }, + "TypeSchema": { + "base": null, + "refs": { + "DescribeTypeOutput$Schema": "

The schema that defines the type.

For more information on type schemas, see Resource Provider Schema in the CloudFormation CLI User Guide.

" + } + }, + "TypeSummaries": { + "base": null, + "refs": { + "ListTypesOutput$TypeSummaries": "

A list of TypeSummary structures that contain information about the specified types.

" + } + }, + "TypeSummary": { + "base": "

Contains summary information about the specified CloudFormation type.

", + "refs": { + "TypeSummaries$member": null + } + }, + "TypeVersionId": { + "base": null, + "refs": { + "DeregisterTypeInput$VersionId": "

The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.

", + "DescribeTypeInput$VersionId": "

The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.

If you specify a VersionId, DescribeType returns information about that specific type version. Otherwise, it returns information about the default type version.

", + "DescribeTypeOutput$DefaultVersionId": "

The ID of the default version of the type. The default version is used when the type version is not specified.

To set the default version of a type, use SetTypeDefaultVersion .

", + "SetTypeDefaultVersionInput$VersionId": "

The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.

", + "TypeSummary$DefaultVersionId": "

The ID of the default version of the type. The default version is used when the type version is not specified.

To set the default version of a type, use SetTypeDefaultVersion .

", + "TypeVersionSummary$VersionId": "

The ID of a specific version of the type. The version ID is the value at the end of the Amazon Resource Name (ARN) assigned to the type version when it is registered.

" + } + }, + "TypeVersionSummaries": { + "base": null, + "refs": { + "ListTypeVersionsOutput$TypeVersionSummaries": "

A list of TypeVersionSummary structures that contain information about the specified type's versions.

" + } + }, + "TypeVersionSummary": { + "base": "

Contains summary information about a specific version of a CloudFormation type.

", + "refs": { + "TypeVersionSummaries$member": null + } + }, "UpdateStackInput": { "base": "

The input for an UpdateStack action.

", "refs": { @@ -2055,6 +2460,13 @@ "refs": { "GetTemplateSummaryOutput$Version": "

The AWS template format version, which identifies the capabilities of the template.

" } + }, + "Visibility": { + "base": null, + "refs": { + "DescribeTypeOutput$Visibility": "

The scope at which the type is visible and usable in CloudFormation operations.

Valid values include:

", + "ListTypesInput$Visibility": "

The scope at which the type is visible and usable in CloudFormation operations.

Valid values include:

" + } } } } diff --git a/models/apis/cloudformation/2010-05-15/paginators-1.json b/models/apis/cloudformation/2010-05-15/paginators-1.json index 51df4af1a13..fb1c4ff2699 100644 --- a/models/apis/cloudformation/2010-05-15/paginators-1.json +++ b/models/apis/cloudformation/2010-05-15/paginators-1.json @@ -37,6 +37,21 @@ "input_token": "NextToken", "output_token": "NextToken", "result_key": "StackSummaries" + }, + "ListTypeRegistrations": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, + "ListTypeVersions": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, + "ListTypes": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" } } } \ No newline at end of file diff --git a/models/apis/cloudformation/2010-05-15/waiters-2.json b/models/apis/cloudformation/2010-05-15/waiters-2.json index 01bb0986e4b..a8d91064666 100644 --- a/models/apis/cloudformation/2010-05-15/waiters-2.json +++ b/models/apis/cloudformation/2010-05-15/waiters-2.json @@ -232,6 +232,26 @@ "state": "failure" } ] + }, + "TypeRegistrationComplete": { + "delay": 30, + "operation": "DescribeTypeRegistration", + "maxAttempts": 120, + "description": "Wait until type registration is COMPLETE.", + "acceptors": [ + { + "argument": "ProgressStatus", + "expected": "COMPLETE", + "matcher": "path", + "state": "success" + }, + { + "argument": "ProgressStatus", + "expected": "FAILED", + "matcher": "path", + "state": "failure" + } + ] } } } diff --git a/models/apis/cloudsearch/2013-01-01/api-2.json b/models/apis/cloudsearch/2013-01-01/api-2.json index e17e3c67b30..32437bf2f0a 100644 --- a/models/apis/cloudsearch/2013-01-01/api-2.json +++ b/models/apis/cloudsearch/2013-01-01/api-2.json @@ -3,11 +3,12 @@ "metadata":{ "apiVersion":"2013-01-01", "endpointPrefix":"cloudsearch", + "protocol":"query", "serviceFullName":"Amazon CloudSearch", + "serviceId":"CloudSearch", "signatureVersion":"v4", - "xmlNamespace":"http://cloudsearch.amazonaws.com/doc/2013-01-01/", - "protocol":"query", - "uid":"cloudsearch-2013-01-01" + "uid":"cloudsearch-2013-01-01", + "xmlNamespace":"http://cloudsearch.amazonaws.com/doc/2013-01-01/" }, "operations":{ "BuildSuggesters":{ @@ -22,27 +23,9 @@ "resultWrapper":"BuildSuggestersResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"} ] }, "CreateDomain":{ @@ -57,27 +40,9 @@ "resultWrapper":"CreateDomainResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"LimitExceededException", - "error":{ - "code":"LimitExceeded", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"} ] }, "DefineAnalysisScheme":{ @@ -92,45 +57,11 @@ "resultWrapper":"DefineAnalysisSchemeResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"LimitExceededException", - "error":{ - "code":"LimitExceeded", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"InvalidTypeException", - "error":{ - "code":"InvalidType", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidTypeException"}, + {"shape":"ResourceNotFoundException"} ] }, "DefineExpression":{ @@ -145,45 +76,11 @@ "resultWrapper":"DefineExpressionResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"LimitExceededException", - "error":{ - "code":"LimitExceeded", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"InvalidTypeException", - "error":{ - "code":"InvalidType", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidTypeException"}, + {"shape":"ResourceNotFoundException"} ] }, "DefineIndexField":{ @@ -198,45 +95,11 @@ "resultWrapper":"DefineIndexFieldResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"LimitExceededException", - "error":{ - "code":"LimitExceeded", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"InvalidTypeException", - "error":{ - "code":"InvalidType", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidTypeException"}, + {"shape":"ResourceNotFoundException"} ] }, "DefineSuggester":{ @@ -251,45 +114,11 @@ "resultWrapper":"DefineSuggesterResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"LimitExceededException", - "error":{ - "code":"LimitExceeded", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"InvalidTypeException", - "error":{ - "code":"InvalidType", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidTypeException"}, + {"shape":"ResourceNotFoundException"} ] }, "DeleteAnalysisScheme":{ @@ -304,36 +133,10 @@ "resultWrapper":"DeleteAnalysisSchemeResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"InvalidTypeException", - "error":{ - "code":"InvalidType", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"InvalidTypeException"}, + {"shape":"ResourceNotFoundException"} ] }, "DeleteDomain":{ @@ -348,18 +151,8 @@ "resultWrapper":"DeleteDomainResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"} ] }, "DeleteExpression":{ @@ -374,36 +167,10 @@ "resultWrapper":"DeleteExpressionResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"InvalidTypeException", - "error":{ - "code":"InvalidType", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"InvalidTypeException"}, + {"shape":"ResourceNotFoundException"} ] }, "DeleteIndexField":{ @@ -418,36 +185,10 @@ "resultWrapper":"DeleteIndexFieldResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"InvalidTypeException", - "error":{ - "code":"InvalidType", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"InvalidTypeException"}, + {"shape":"ResourceNotFoundException"} ] }, "DeleteSuggester":{ @@ -462,36 +203,10 @@ "resultWrapper":"DeleteSuggesterResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"InvalidTypeException", - "error":{ - "code":"InvalidType", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"InvalidTypeException"}, + {"shape":"ResourceNotFoundException"} ] }, "DescribeAnalysisSchemes":{ @@ -506,27 +221,9 @@ "resultWrapper":"DescribeAnalysisSchemesResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"} ] }, "DescribeAvailabilityOptions":{ @@ -541,54 +238,31 @@ "resultWrapper":"DescribeAvailabilityOptionsResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"InvalidTypeException", - "error":{ - "code":"InvalidType", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"LimitExceededException", - "error":{ - "code":"LimitExceeded", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"DisabledOperationException", - "error":{ - "code":"DisabledAction", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"InvalidTypeException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"} + ] + }, + "DescribeDomainEndpointOptions":{ + "name":"DescribeDomainEndpointOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDomainEndpointOptionsRequest"}, + "output":{ + "shape":"DescribeDomainEndpointOptionsResponse", + "resultWrapper":"DescribeDomainEndpointOptionsResult" + }, + "errors":[ + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"} ] }, "DescribeDomains":{ @@ -603,18 +277,8 @@ "resultWrapper":"DescribeDomainsResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"} ] }, "DescribeExpressions":{ @@ -629,27 +293,9 @@ "resultWrapper":"DescribeExpressionsResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"} ] }, "DescribeIndexFields":{ @@ -664,27 +310,9 @@ "resultWrapper":"DescribeIndexFieldsResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"} ] }, "DescribeScalingParameters":{ @@ -699,27 +327,9 @@ "resultWrapper":"DescribeScalingParametersResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"} ] }, "DescribeServiceAccessPolicies":{ @@ -734,27 +344,9 @@ "resultWrapper":"DescribeServiceAccessPoliciesResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"} ] }, "DescribeSuggesters":{ @@ -769,27 +361,9 @@ "resultWrapper":"DescribeSuggestersResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"} ] }, "IndexDocuments":{ @@ -804,27 +378,9 @@ "resultWrapper":"IndexDocumentsResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"} ] }, "ListDomainNames":{ @@ -838,10 +394,7 @@ "resultWrapper":"ListDomainNamesResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - } + {"shape":"BaseException"} ] }, "UpdateAvailabilityOptions":{ @@ -856,54 +409,34 @@ "resultWrapper":"UpdateAvailabilityOptionsResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"InvalidTypeException", - "error":{ - "code":"InvalidType", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"LimitExceededException", - "error":{ - "code":"LimitExceeded", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"DisabledOperationException", - "error":{ - "code":"DisabledAction", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"InvalidTypeException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"}, + {"shape":"ValidationException"} + ] + }, + "UpdateDomainEndpointOptions":{ + "name":"UpdateDomainEndpointOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDomainEndpointOptionsRequest"}, + "output":{ + "shape":"UpdateDomainEndpointOptionsResponse", + "resultWrapper":"UpdateDomainEndpointOptionsResult" + }, + "errors":[ + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"InvalidTypeException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"DisabledOperationException"}, + {"shape":"ValidationException"} ] }, "UpdateScalingParameters":{ @@ -918,45 +451,11 @@ "resultWrapper":"UpdateScalingParametersResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"LimitExceededException", - "error":{ - "code":"LimitExceeded", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"InvalidTypeException", - "error":{ - "code":"InvalidType", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidTypeException"} ] }, "UpdateServiceAccessPolicies":{ @@ -971,45 +470,11 @@ "resultWrapper":"UpdateServiceAccessPoliciesResult" }, "errors":[ - { - "shape":"BaseException", - "exception":true - }, - { - "shape":"InternalException", - "error":{ - "code":"InternalException", - "httpStatusCode":500 - }, - "exception":true - }, - { - "shape":"LimitExceededException", - "error":{ - "code":"LimitExceeded", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"ResourceNotFoundException", - "error":{ - "code":"ResourceNotFound", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - { - "shape":"InvalidTypeException", - "error":{ - "code":"InvalidType", - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - } + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidTypeException"} ] } }, @@ -1367,6 +832,20 @@ "AvailabilityOptions":{"shape":"AvailabilityOptionsStatus"} } }, + "DescribeDomainEndpointOptionsRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{"shape":"DomainName"}, + "Deployed":{"shape":"Boolean"} + } + }, + "DescribeDomainEndpointOptionsResponse":{ + "type":"structure", + "members":{ + "DomainEndpointOptions":{"shape":"DomainEndpointOptionsStatus"} + } + }, "DescribeDomainsRequest":{ "type":"structure", "members":{ @@ -1477,15 +956,33 @@ "SortExpression":{"shape":"String"} } }, + "DomainEndpointOptions":{ + "type":"structure", + "members":{ + "EnforceHTTPS":{"shape":"Boolean"}, + "TLSSecurityPolicy":{"shape":"TLSSecurityPolicy"} + } + }, + "DomainEndpointOptionsStatus":{ + "type":"structure", + "required":[ + "Options", + "Status" + ], + "members":{ + "Options":{"shape":"DomainEndpointOptions"}, + "Status":{"shape":"OptionStatus"} + } + }, "DomainId":{ "type":"string", - "min":1, - "max":64 + "max":64, + "min":1 }, "DomainName":{ "type":"string", - "min":3, "max":28, + "min":3, "pattern":"[a-z][a-z0-9\\-]+" }, "DomainNameList":{ @@ -1548,8 +1045,8 @@ }, "DynamicFieldName":{ "type":"string", - "min":1, "max":64, + "min":1, "pattern":"([a-z][a-z0-9_]*\\*?|\\*[a-z0-9_]*)" }, "DynamicFieldNameList":{ @@ -1586,13 +1083,13 @@ }, "ExpressionValue":{ "type":"string", - "min":1, - "max":10240 + "max":10240, + "min":1 }, "FieldName":{ "type":"string", - "min":1, "max":64, + "min":1, "pattern":"[a-z][a-z0-9_]*" }, "FieldNameCommaList":{ @@ -1605,8 +1102,8 @@ }, "FieldValue":{ "type":"string", - "min":0, - "max":1024 + "max":1024, + "min":0 }, "IndexDocumentsRequest":{ "type":"structure", @@ -1872,8 +1369,8 @@ "ServiceUrl":{"type":"string"}, "StandardName":{ "type":"string", - "min":1, "max":64, + "min":1, "pattern":"[a-z][a-z0-9_]*" }, "StandardNameList":{ @@ -1915,6 +1412,13 @@ "type":"list", "member":{"shape":"SuggesterStatus"} }, + "TLSSecurityPolicy":{ + "type":"string", + "enum":[ + "Policy-Min-TLS-1-0-2019-07", + "Policy-Min-TLS-1-2-2019-07" + ] + }, "TextArrayOptions":{ "type":"structure", "members":{ @@ -1957,6 +1461,23 @@ "AvailabilityOptions":{"shape":"AvailabilityOptionsStatus"} } }, + "UpdateDomainEndpointOptionsRequest":{ + "type":"structure", + "required":[ + "DomainName", + "DomainEndpointOptions" + ], + "members":{ + "DomainName":{"shape":"DomainName"}, + "DomainEndpointOptions":{"shape":"DomainEndpointOptions"} + } + }, + "UpdateDomainEndpointOptionsResponse":{ + "type":"structure", + "members":{ + "DomainEndpointOptions":{"shape":"DomainEndpointOptionsStatus"} + } + }, "UpdateScalingParametersRequest":{ "type":"structure", "required":[ @@ -1994,6 +1515,12 @@ } }, "UpdateTimestamp":{"type":"timestamp"}, + "ValidationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "Word":{ "type":"string", "pattern":"[\\S]+" diff --git a/models/apis/cloudsearch/2013-01-01/docs-2.json b/models/apis/cloudsearch/2013-01-01/docs-2.json index a4b126a7c25..1de25c814ad 100644 --- a/models/apis/cloudsearch/2013-01-01/docs-2.json +++ b/models/apis/cloudsearch/2013-01-01/docs-2.json @@ -1,5 +1,6 @@ { "version": "2.0", + "service": "Amazon CloudSearch Configuration Service

You use the Amazon CloudSearch configuration service to create, configure, and manage search domains. Configuration service requests are submitted using the AWS Query protocol. AWS Query requests are HTTP or HTTPS requests submitted via HTTP GET or POST with a query parameter named Action.

The endpoint for configuration service requests is region-specific: cloudsearch.region.amazonaws.com. For example, cloudsearch.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints.

", "operations": { "BuildSuggesters": "

Indexes the search suggestions. For more information, see Configuring Suggesters in the Amazon CloudSearch Developer Guide.

", "CreateDomain": "

Creates a new search domain. For more information, see Creating a Search Domain in the Amazon CloudSearch Developer Guide.

", @@ -14,6 +15,7 @@ "DeleteSuggester": "

Deletes a suggester. For more information, see Getting Search Suggestions in the Amazon CloudSearch Developer Guide.

", "DescribeAnalysisSchemes": "

Gets the analysis schemes configured for a domain. An analysis scheme defines language-specific text processing options for a text field. Can be limited to specific analysis schemes by name. By default, shows all analysis schemes and includes any pending changes to the configuration. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Configuring Analysis Schemes in the Amazon CloudSearch Developer Guide.

", "DescribeAvailabilityOptions": "

Gets the availability options configured for a domain. By default, shows the configuration with any pending changes. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Configuring Availability Options in the Amazon CloudSearch Developer Guide.

", + "DescribeDomainEndpointOptions": "

Returns the domain's endpoint options, specifically whether all requests to the domain must arrive over HTTPS. For more information, see Configuring Domain Endpoint Options in the Amazon CloudSearch Developer Guide.

", "DescribeDomains": "

Gets information about the search domains owned by this account. Can be limited to specific domains. Shows all domains by default. To get the number of searchable documents in a domain, use the console or submit a matchall request to your domain's search endpoint: q=matchall&q.parser=structured&size=0. For more information, see Getting Information about a Search Domain in the Amazon CloudSearch Developer Guide.

", "DescribeExpressions": "

Gets the expressions configured for the search domain. Can be limited to specific expressions by name. By default, shows all expressions and includes any pending changes to the configuration. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Configuring Expressions in the Amazon CloudSearch Developer Guide.

", "DescribeIndexFields": "

Gets information about the index fields configured for the search domain. Can be limited to specific fields by name. By default, shows all fields and includes any pending changes to the configuration. Set the Deployed option to true to show the active configuration and exclude pending changes. For more information, see Getting Domain Information in the Amazon CloudSearch Developer Guide.

", @@ -23,10 +25,10 @@ "IndexDocuments": "

Tells the search domain to start indexing its documents using the latest indexing options. This operation must be invoked to activate options whose OptionStatus is RequiresIndexDocuments.

", "ListDomainNames": "

Lists all search domains owned by an account.

", "UpdateAvailabilityOptions": "

Configures the availability options for a domain. Enabling the Multi-AZ option expands an Amazon CloudSearch domain to an additional Availability Zone in the same Region to increase fault tolerance in the event of a service disruption. Changes to the Multi-AZ option can take about half an hour to become active. For more information, see Configuring Availability Options in the Amazon CloudSearch Developer Guide.

", + "UpdateDomainEndpointOptions": "

Updates the domain's endpoint options, specifically whether all requests to the domain must arrive over HTTPS. For more information, see Configuring Domain Endpoint Options in the Amazon CloudSearch Developer Guide.

", "UpdateScalingParameters": "

Configures scaling parameters for a domain. A domain's scaling parameters specify the desired search instance type and replication count. Amazon CloudSearch will still automatically scale your domain based on the volume of data and traffic, but not below the desired instance type and replication count. If the Multi-AZ option is enabled, these values control the resources used per Availability Zone. For more information, see Configuring Scaling Options in the Amazon CloudSearch Developer Guide.

", "UpdateServiceAccessPolicies": "

Configures the access rules that control access to the domain's document and search endpoints. For more information, see Configuring Access for an Amazon CloudSearch Domain.

" }, - "service": "Amazon CloudSearch Configuration Service

You use the Amazon CloudSearch configuration service to create, configure, and manage search domains. Configuration service requests are submitted using the AWS Query protocol. AWS Query requests are HTTP or HTTPS requests submitted via HTTP GET or POST with a query parameter named Action.

The endpoint for configuration service requests is region-specific: cloudsearch.region.amazonaws.com. For example, cloudsearch.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see Regions and Endpoints.

", "shapes": { "APIVersion": { "base": "

The Amazon CloudSearch API version for a domain: 2011-02-01 or 2013-01-01.

", @@ -110,10 +112,12 @@ "DateOptions$SortEnabled": "

Whether the field can be used to sort the search results.

", "DescribeAnalysisSchemesRequest$Deployed": "

Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

", "DescribeAvailabilityOptionsRequest$Deployed": "

Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

", + "DescribeDomainEndpointOptionsRequest$Deployed": "

Whether to retrieve the latest configuration (which might be in a Processing state) or the current, active configuration. Defaults to false.

", "DescribeExpressionsRequest$Deployed": "

Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

", "DescribeIndexFieldsRequest$Deployed": "

Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

", "DescribeServiceAccessPoliciesRequest$Deployed": "

Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

", "DescribeSuggestersRequest$Deployed": "

Whether to display the deployed configuration (true) or include any pending changes (false). Defaults to false.

", + "DomainEndpointOptions$EnforceHTTPS": "

Whether the domain is HTTPS only enabled.

", "DomainStatus$Created": "

True if the search domain is created. It can take several minutes to initialize a domain when CreateDomain is called. Newly created search domains are returned from DescribeDomains with a false value for Created until domain creation is complete.

", "DomainStatus$Deleted": "

True if the search domain has been deleted. The system must clean up resources dedicated to the search domain when DeleteDomain is called. Newly deleted search domains are returned from DescribeDomains with a true value for IsDeleted for several minutes until resource cleanup is complete.

", "DomainStatus$RequiresIndexDocuments": "

True if IndexDocuments needs to be called to activate the current domain configuration.

", @@ -294,6 +298,16 @@ "refs": { } }, + "DescribeDomainEndpointOptionsRequest": { + "base": "

Container for the parameters to the DescribeDomainEndpointOptions operation. Specify the name of the domain you want to describe. To show the active configuration and exclude any pending changes, set the Deployed option to true.

", + "refs": { + } + }, + "DescribeDomainEndpointOptionsResponse": { + "base": "

The result of a DescribeDomainEndpointOptions request. Contains the status and configuration of a search domain's endpoint options.

", + "refs": { + } + }, "DescribeDomainsRequest": { "base": "

Container for the parameters to the DescribeDomains operation. By default shows the status of all domains. To restrict the response to particular domains, specify the names of the domains you want to describe.

", "refs": { @@ -365,6 +379,20 @@ "Suggester$DocumentSuggesterOptions": null } }, + "DomainEndpointOptions": { + "base": "

The domain's endpoint options.

", + "refs": { + "DomainEndpointOptionsStatus$Options": "

The domain endpoint options configured for the domain.

", + "UpdateDomainEndpointOptionsRequest$DomainEndpointOptions": "

Whether to require that all requests to the domain arrive over HTTPS. We recommend Policy-Min-TLS-1-2-2019-07 for TLSSecurityPolicy. For compatibility with older clients, the default is Policy-Min-TLS-1-0-2019-07.

" + } + }, + "DomainEndpointOptionsStatus": { + "base": "

The configuration and status of the domain's endpoint options.

", + "refs": { + "DescribeDomainEndpointOptionsResponse$DomainEndpointOptions": "

The status and configuration of a search domain's endpoint options.

", + "UpdateDomainEndpointOptionsResponse$DomainEndpointOptions": "

The newly-configured domain endpoint options.

" + } + }, "DomainId": { "base": "

An internally generated unique identifier for a domain.

", "refs": { @@ -387,6 +415,7 @@ "DeleteSuggesterRequest$DomainName": null, "DescribeAnalysisSchemesRequest$DomainName": "

The name of the domain you want to describe.

", "DescribeAvailabilityOptionsRequest$DomainName": "

The name of the domain you want to describe.

", + "DescribeDomainEndpointOptionsRequest$DomainName": "

A string that represents the name of a domain.

", "DescribeExpressionsRequest$DomainName": "

The name of the domain you want to describe.

", "DescribeIndexFieldsRequest$DomainName": "

The name of the domain you want to describe.

", "DescribeScalingParametersRequest$DomainName": null, @@ -397,6 +426,7 @@ "DomainStatus$DomainName": null, "IndexDocumentsRequest$DomainName": null, "UpdateAvailabilityOptionsRequest$DomainName": null, + "UpdateDomainEndpointOptionsRequest$DomainName": "

A string that represents the name of a domain.

", "UpdateScalingParametersRequest$DomainName": null, "UpdateServiceAccessPoliciesRequest$DomainName": null } @@ -668,7 +698,7 @@ "OptionState": { "base": "

The state of processing a change to an option. One of:

", "refs": { - "OptionStatus$State": "

The state of processing a change to an option. Possible values:

" + "OptionStatus$State": "

The state of processing a change to an option. Possible values:

" } }, "OptionStatus": { @@ -677,6 +707,7 @@ "AccessPoliciesStatus$Status": null, "AnalysisSchemeStatus$Status": null, "AvailabilityOptionsStatus$Status": null, + "DomainEndpointOptionsStatus$Status": "

The status of the configured domain endpoint options.

", "ExpressionStatus$Status": null, "IndexFieldStatus$Status": null, "ScalingParametersStatus$Status": null, @@ -797,6 +828,12 @@ "DescribeSuggestersResponse$Suggesters": "

The suggesters configured for the domain specified in the request.

" } }, + "TLSSecurityPolicy": { + "base": "

The minimum required TLS version.

", + "refs": { + "DomainEndpointOptions$TLSSecurityPolicy": "

The minimum required TLS version

" + } + }, "TextArrayOptions": { "base": "

Options for a field that contains an array of text strings. Present if IndexFieldType specifies the field is of type text-array. A text-array field is always searchable. All options are enabled by default.

", "refs": { @@ -827,6 +864,16 @@ "refs": { } }, + "UpdateDomainEndpointOptionsRequest": { + "base": "

Container for the parameters to the UpdateDomainEndpointOptions operation. Specifies the name of the domain you want to update and the domain endpoint options.

", + "refs": { + } + }, + "UpdateDomainEndpointOptionsResponse": { + "base": "

The result of a UpdateDomainEndpointOptions request. Contains the configuration and status of the domain's endpoint options.

", + "refs": { + } + }, "UpdateScalingParametersRequest": { "base": "

Container for the parameters to the UpdateScalingParameters operation. Specifies the name of the domain you want to update and the scaling parameters you want to configure.

", "refs": { @@ -854,6 +901,11 @@ "OptionStatus$UpdateDate": "

A timestamp for when this option was last updated.

" } }, + "ValidationException": { + "base": "

The request was rejected because it has invalid parameters.

", + "refs": { + } + }, "Word": { "base": null, "refs": { diff --git a/models/apis/cloudsearch/2013-01-01/examples-1.json b/models/apis/cloudsearch/2013-01-01/examples-1.json new file mode 100644 index 00000000000..0ea7e3b0bbe --- /dev/null +++ b/models/apis/cloudsearch/2013-01-01/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/models/apis/cloudsearch/2013-01-01/paginators-1.json b/models/apis/cloudsearch/2013-01-01/paginators-1.json index 82fa804ab75..fc7e95d086e 100644 --- a/models/apis/cloudsearch/2013-01-01/paginators-1.json +++ b/models/apis/cloudsearch/2013-01-01/paginators-1.json @@ -16,5 +16,4 @@ "result_key": "Suggesters" } } -} - +} \ No newline at end of file diff --git a/models/apis/cloudsearch/2013-01-01/smoke.json b/models/apis/cloudsearch/2013-01-01/smoke.json new file mode 100644 index 00000000000..04457b247af --- /dev/null +++ b/models/apis/cloudsearch/2013-01-01/smoke.json @@ -0,0 +1,18 @@ +{ + "version": 1, + "defaultRegion": "us-west-2", + "testCases": [ + { + "operationName": "DescribeDomains", + "input": {}, + "errorExpectedFromService": false + }, + { + "operationName": "DescribeIndexFields", + "input": { + "DomainName": "fakedomain" + }, + "errorExpectedFromService": true + } + ] +} diff --git a/models/apis/cloudtrail/2013-11-01/api-2.json b/models/apis/cloudtrail/2013-11-01/api-2.json index f6afa05526c..da8b8d47ae9 100644 --- a/models/apis/cloudtrail/2013-11-01/api-2.json +++ b/models/apis/cloudtrail/2013-11-01/api-2.json @@ -102,7 +102,8 @@ "output":{"shape":"DescribeTrailsResponse"}, "errors":[ {"shape":"UnsupportedOperationException"}, - {"shape":"OperationNotPermittedException"} + {"shape":"OperationNotPermittedException"}, + {"shape":"InvalidTrailNameException"} ], "idempotent":true }, @@ -122,6 +123,23 @@ ], "idempotent":true }, + "GetInsightSelectors":{ + "name":"GetInsightSelectors", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetInsightSelectorsRequest"}, + "output":{"shape":"GetInsightSelectorsResponse"}, + "errors":[ + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"InsightNotEnabledException"} + ], + "idempotent":true + }, "GetTrail":{ "name":"GetTrail", "http":{ @@ -148,7 +166,9 @@ "output":{"shape":"GetTrailStatusResponse"}, "errors":[ {"shape":"TrailNotFoundException"}, - {"shape":"InvalidTrailNameException"} + {"shape":"InvalidTrailNameException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} ], "idempotent":true }, @@ -213,7 +233,10 @@ {"shape":"InvalidLookupAttributesException"}, {"shape":"InvalidTimeRangeException"}, {"shape":"InvalidMaxResultsException"}, - {"shape":"InvalidNextTokenException"} + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidEventCategoryException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"} ], "idempotent":true }, @@ -237,6 +260,27 @@ ], "idempotent":true }, + "PutInsightSelectors":{ + "name":"PutInsightSelectors", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutInsightSelectorsRequest"}, + "output":{"shape":"PutInsightSelectorsResponse"}, + "errors":[ + {"shape":"TrailNotFoundException"}, + {"shape":"InvalidTrailNameException"}, + {"shape":"InvalidHomeRegionException"}, + {"shape":"InvalidInsightSelectorsException"}, + {"shape":"InsufficientS3BucketPolicyException"}, + {"shape":"InsufficientEncryptionPolicyException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"NotOrganizationMasterAccountException"} + ], + "idempotent":true + }, "RemoveTags":{ "name":"RemoveTags", "http":{ @@ -315,6 +359,7 @@ {"shape":"InvalidKmsKeyIdException"}, {"shape":"InvalidTrailNameException"}, {"shape":"TrailNotProvidedException"}, + {"shape":"InvalidEventSelectorsException"}, {"shape":"InvalidParameterCombinationException"}, {"shape":"InvalidHomeRegionException"}, {"shape":"KmsKeyNotFoundException"}, @@ -465,12 +510,17 @@ "CloudTrailEvent":{"shape":"String"} } }, + "EventCategory":{ + "type":"string", + "enum":["insight"] + }, "EventSelector":{ "type":"structure", "members":{ "ReadWriteType":{"shape":"ReadWriteType"}, "IncludeManagementEvents":{"shape":"Boolean"}, - "DataResources":{"shape":"DataResources"} + "DataResources":{"shape":"DataResources"}, + "ExcludeManagementEventSources":{"shape":"ExcludeManagementEventSources"} } }, "EventSelectors":{ @@ -481,6 +531,10 @@ "type":"list", "member":{"shape":"Event"} }, + "ExcludeManagementEventSources":{ + "type":"list", + "member":{"shape":"String"} + }, "GetEventSelectorsRequest":{ "type":"structure", "required":["TrailName"], @@ -495,6 +549,20 @@ "EventSelectors":{"shape":"EventSelectors"} } }, + "GetInsightSelectorsRequest":{ + "type":"structure", + "required":["TrailName"], + "members":{ + "TrailName":{"shape":"String"} + } + }, + "GetInsightSelectorsResponse":{ + "type":"structure", + "members":{ + "TrailARN":{"shape":"String"}, + "InsightSelectors":{"shape":"InsightSelectors"} + } + }, "GetTrailRequest":{ "type":"structure", "required":["Name"], @@ -537,6 +605,26 @@ "TimeLoggingStopped":{"shape":"String"} } }, + "InsightNotEnabledException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InsightSelector":{ + "type":"structure", + "members":{ + "InsightType":{"shape":"InsightType"} + } + }, + "InsightSelectors":{ + "type":"list", + "member":{"shape":"InsightSelector"} + }, + "InsightType":{ + "type":"string", + "enum":["ApiCallRateInsight"] + }, "InsufficientDependencyServiceAccessPermissionException":{ "type":"structure", "members":{ @@ -573,6 +661,12 @@ }, "exception":true }, + "InvalidEventCategoryException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidEventSelectorsException":{ "type":"structure", "members":{ @@ -585,6 +679,12 @@ }, "exception":true }, + "InvalidInsightSelectorsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidKmsKeyIdException":{ "type":"structure", "members":{ @@ -753,6 +853,7 @@ "LookupAttributes":{"shape":"LookupAttributesList"}, "StartTime":{"shape":"Date"}, "EndTime":{"shape":"Date"}, + "EventCategory":{"shape":"EventCategory"}, "MaxResults":{"shape":"MaxResults"}, "NextToken":{"shape":"NextToken"} } @@ -831,6 +932,24 @@ "EventSelectors":{"shape":"EventSelectors"} } }, + "PutInsightSelectorsRequest":{ + "type":"structure", + "required":[ + "TrailName", + "InsightSelectors" + ], + "members":{ + "TrailName":{"shape":"String"}, + "InsightSelectors":{"shape":"InsightSelectors"} + } + }, + "PutInsightSelectorsResponse":{ + "type":"structure", + "members":{ + "TrailARN":{"shape":"String"}, + "InsightSelectors":{"shape":"InsightSelectors"} + } + }, "ReadWriteType":{ "type":"string", "enum":[ @@ -959,6 +1078,7 @@ "CloudWatchLogsRoleArn":{"shape":"String"}, "KmsKeyId":{"shape":"String"}, "HasCustomEventSelectors":{"shape":"Boolean"}, + "HasInsightSelectors":{"shape":"Boolean"}, "IsOrganizationTrail":{"shape":"Boolean"} } }, diff --git a/models/apis/cloudtrail/2013-11-01/docs-2.json b/models/apis/cloudtrail/2013-11-01/docs-2.json index 65d2ca4fa43..548cf6702d0 100644 --- a/models/apis/cloudtrail/2013-11-01/docs-2.json +++ b/models/apis/cloudtrail/2013-11-01/docs-2.json @@ -7,13 +7,15 @@ "DeleteTrail": "

Deletes a trail. This operation must be called from the region in which the trail was created. DeleteTrail cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions.

", "DescribeTrails": "

Retrieves settings for one or more trails associated with the current region for your account.

", "GetEventSelectors": "

Describes the settings for the event selectors that you configured for your trail. The information returned for your event selectors includes the following:

For more information, see Logging Data and Management Events for Trails in the AWS CloudTrail User Guide.

", + "GetInsightSelectors": "

Describes the settings for the Insights event selectors that you configured for your trail. GetInsightSelectors shows if CloudTrail Insights event logging is enabled on the trail, and if it is, which insight types are enabled. If you run GetInsightSelectors on a trail that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException

For more information, see Logging CloudTrail Insights Events for Trails in the AWS CloudTrail User Guide.

", "GetTrail": "

Returns settings information for a specified trail.

", "GetTrailStatus": "

Returns a JSON-formatted list of information about the specified trail. Fields include information on delivery errors, Amazon SNS and Amazon S3 errors, and start and stop logging times for each trail. This operation returns trail status from a single region. To return trail status from all regions, you must call the operation on each region.

", "ListPublicKeys": "

Returns all public keys whose private keys were used to sign the digest files within the specified time range. The public key is needed to validate digest files that were signed with its corresponding private key.

CloudTrail uses different private/public key pairs per region. Each digest file is signed with a private key unique to its region. Therefore, when you validate a digest file from a particular region, you must look in the same region for its corresponding public key.

", "ListTags": "

Lists the tags for the trail in the current region.

", "ListTrails": "

Lists trails that are in the current account.

", - "LookupEvents": "

Looks up management events captured by CloudTrail. You can look up events that occurred in a region within the last 90 days. Lookup supports the following attributes:

All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.

The rate of lookup requests is limited to one per second per account. If this limit is exceeded, a throttling error occurs.

Events that occurred during the selected time range will not be available for lookup if CloudTrail logging was not enabled when the events occurred.

", + "LookupEvents": "

Looks up management events or CloudTrail Insights events that are captured by CloudTrail. You can look up events that occurred in a region within the last 90 days. Lookup supports the following attributes for management events:

Lookup supports the following attributes for Insights events:

All attributes are optional. The default number of results returned is 50, with a maximum of 50 possible. The response includes a token that you can use to get the next page of results.

The rate of lookup requests is limited to two per second per account. If this limit is exceeded, a throttling error occurs.

", "PutEventSelectors": "

Configures an event selector for your trail. Use event selectors to further specify the management and data event settings for your trail. By default, trails created without specific event selectors will be configured to log all read and write management events, and no data events.

When an event occurs in your account, CloudTrail evaluates the event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.

Example

  1. You create an event selector for a trail and specify that you want write-only events.

  2. The EC2 GetConsoleOutput and RunInstances API operations occur in your account.

  3. CloudTrail evaluates whether the events match your event selectors.

  4. The RunInstances is a write-only event and it matches your event selector. The trail logs the event.

  5. The GetConsoleOutput is a read-only event but it doesn't match your event selector. The trail doesn't log the event.

The PutEventSelectors operation must be called from the region in which the trail was created; otherwise, an InvalidHomeRegionException is thrown.

You can configure up to five event selectors for each trail. For more information, see Logging Data and Management Events for Trails and Limits in AWS CloudTrail in the AWS CloudTrail User Guide.

", + "PutInsightSelectors": "

Lets you enable Insights event logging by specifying the Insights selectors that you want to enable on an existing trail. You also use PutInsightSelectors to turn off Insights event logging, by passing an empty list of insight types. In this release, only ApiCallRateInsight is supported as an Insights selector.

", "RemoveTags": "

Removes the specified tags from a trail.

", "StartLogging": "

Starts the recording of AWS API calls and log file delivery for a trail. For a trail that is enabled in all regions, this operation must be called from the region in which the trail was created. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions.

", "StopLogging": "

Suspends the recording of AWS API calls and log file delivery for the specified trail. Under most circumstances, there is no need to use this action. You can update a trail without stopping it first. This action is the only way to stop recording. For a trail enabled in all regions, this operation must be called from the region in which the trail was created, or an InvalidHomeRegionException will occur. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail enabled in all regions.

", @@ -48,6 +50,7 @@ "Trail$IsMultiRegionTrail": "

Specifies whether the trail exists only in one region or exists in all regions.

", "Trail$LogFileValidationEnabled": "

Specifies whether log file validation is enabled.

", "Trail$HasCustomEventSelectors": "

Specifies if the trail has custom event selectors.

", + "Trail$HasInsightSelectors": "

Specifies whether a trail has insight types specified in an InsightSelector list.

", "Trail$IsOrganizationTrail": "

Specifies whether the trail is an organization trail.

", "UpdateTrailRequest$IncludeGlobalServiceEvents": "

Specifies whether the trail is publishing events from global services such as IAM to the log files.

", "UpdateTrailRequest$IsMultiRegionTrail": "

Specifies whether the trail applies only to the current region or to all regions. The default is false. If the trail exists only in the current region and this value is set to true, shadow trails (replications of the trail) will be created in the other regions. If the trail exists in all regions and this value is set to false, the trail will remain in the region where it was created, and its shadow trails in other regions will be deleted. As a best practice, consider using trails that log events in all regions.

", @@ -91,7 +94,7 @@ } }, "DataResource": { - "base": "

The Amazon S3 buckets or AWS Lambda functions that you specify in your event selectors for your trail to log data events. Data events provide insight into the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail.

The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors.

The following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named bucket-1. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read and Write data events.

  1. A user uploads an image file to bucket-1.

  2. The PutObject API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.

  3. A user uploads an object to an Amazon S3 bucket named arn:aws:s3:::bucket-2.

  4. The PutObject API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn’t log the event.

The following example demonstrates how logging works when you configure logging of AWS Lambda data events for a Lambda function named MyLambdaFunction, but not for all AWS Lambda functions.

  1. A user runs a script that includes a call to the MyLambdaFunction function and the MyOtherLambdaFunction function.

  2. The Invoke API operation on MyLambdaFunction is an AWS Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for MyLambdaFunction, any invocations of that function are logged. The trail processes and logs the event.

  3. The Invoke API operation on MyOtherLambdaFunction is an AWS Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke operation for MyOtherLambdaFunction does not match the function specified for the trail. The trail doesn’t log the event.

", + "base": "

The Amazon S3 buckets or AWS Lambda functions that you specify in your event selectors for your trail to log data events. Data events provide information about the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail.

The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors.

The following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named bucket-1. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read and Write data events.

  1. A user uploads an image file to bucket-1.

  2. The PutObject API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.

  3. A user uploads an object to an Amazon S3 bucket named arn:aws:s3:::bucket-2.

  4. The PutObject API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn’t log the event.

The following example demonstrates how logging works when you configure logging of AWS Lambda data events for a Lambda function named MyLambdaFunction, but not for all AWS Lambda functions.

  1. A user runs a script that includes a call to the MyLambdaFunction function and the MyOtherLambdaFunction function.

  2. The Invoke API operation on MyLambdaFunction is an AWS Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for MyLambdaFunction, any invocations of that function are logged. The trail processes and logs the event.

  3. The Invoke API operation on MyOtherLambdaFunction is an AWS Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke operation for MyOtherLambdaFunction does not match the function specified for the trail. The trail doesn’t log the event.

", "refs": { "DataResources$member": null } @@ -152,6 +155,12 @@ "EventsList$member": null } }, + "EventCategory": { + "base": null, + "refs": { + "LookupEventsRequest$EventCategory": "

Specifies the event category. If you do not specify an event category, events of the category are not returned in the response. For example, if you do not specify insight as the value of EventCategory, no Insights events are returned.

" + } + }, "EventSelector": { "base": "

Use event selectors to further specify the management and data event settings for your trail. By default, trails created without specific event selectors will be configured to log all read and write management events, and no data events. When an event occurs in your account, CloudTrail evaluates the event selector for all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.

You can configure up to five event selectors for a trail.

", "refs": { @@ -172,6 +181,12 @@ "LookupEventsResponse$Events": "

A list of events returned based on the lookup attributes specified and the CloudTrail event. The events list is sorted by time. The most recent event is listed first.

" } }, + "ExcludeManagementEventSources": { + "base": null, + "refs": { + "EventSelector$ExcludeManagementEventSources": "

An optional list of service event sources from which you do not want management events to be logged on your trail. In this release, the list can be empty (disables the filter), or it can filter out AWS Key Management Service events by containing \"kms.amazonaws.com\". By default, ExcludeManagementEventSources is empty, and AWS KMS events are included in events that are logged to your trail.

" + } + }, "GetEventSelectorsRequest": { "base": null, "refs": { @@ -182,6 +197,16 @@ "refs": { } }, + "GetInsightSelectorsRequest": { + "base": null, + "refs": { + } + }, + "GetInsightSelectorsResponse": { + "base": null, + "refs": { + } + }, "GetTrailRequest": { "base": null, "refs": { @@ -202,6 +227,31 @@ "refs": { } }, + "InsightNotEnabledException": { + "base": "

If you run GetInsightSelectors on a trail that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException.

", + "refs": { + } + }, + "InsightSelector": { + "base": "

A JSON string that contains a list of insight types that are logged on a trail.

", + "refs": { + "InsightSelectors$member": null + } + }, + "InsightSelectors": { + "base": null, + "refs": { + "GetInsightSelectorsResponse$InsightSelectors": "

A JSON string that contains the insight types you want to log on a trail. In this release, only ApiCallRateInsight is supported as an insight type.

", + "PutInsightSelectorsRequest$InsightSelectors": "

A JSON string that contains the insight types you want to log on a trail. In this release, only ApiCallRateInsight is supported as an insight type.

", + "PutInsightSelectorsResponse$InsightSelectors": "

A JSON string that contains the insight types you want to log on a trail. In this release, only ApiCallRateInsight is supported as an insight type.

" + } + }, + "InsightType": { + "base": null, + "refs": { + "InsightSelector$InsightType": "

The type of insights to log on a trail. In this release, only ApiCallRateInsight is supported as an insight type.

" + } + }, "InsufficientDependencyServiceAccessPermissionException": { "base": "

This exception is thrown when the IAM user or role that is used to create the organization trail is lacking one or more required permissions for creating an organization trail in a required service. For more information, see Prepare For Creating a Trail For Your Organization.

", "refs": { @@ -232,6 +282,11 @@ "refs": { } }, + "InvalidEventCategoryException": { + "base": "

Occurs if an event category that is not valid is specified as a value of EventCategory.

", + "refs": { + } + }, "InvalidEventSelectorsException": { "base": "

This exception is thrown when the PutEventSelectors operation is called with a number of event selectors or data resources that is not valid. The combination of event selectors and data resources is not valid. A trail can have up to 5 event selectors. A trail is limited to 250 data resources. These data resources can be distributed across event selectors, but the overall total cannot exceed 250.

You can:

", "refs": { @@ -242,6 +297,11 @@ "refs": { } }, + "InvalidInsightSelectorsException": { + "base": "

The formatting or syntax of the InsightSelectors JSON statement in your PutInsightSelectors or GetInsightSelectors request is not valid, or the specified insight type in the InsightSelectors statement is not a valid insight type.

", + "refs": { + } + }, "InvalidKmsKeyIdException": { "base": "

This exception is thrown when the KMS key ARN is invalid.

", "refs": { @@ -435,6 +495,16 @@ "refs": { } }, + "PutInsightSelectorsRequest": { + "base": null, + "refs": { + } + }, + "PutInsightSelectorsResponse": { + "base": null, + "refs": { + } + }, "ReadWriteType": { "base": null, "refs": { @@ -546,8 +616,11 @@ "Event$EventSource": "

The AWS service that the request was made to.

", "Event$Username": "

A user name or role name of the requester that called the API in the event returned.

", "Event$CloudTrailEvent": "

A JSON string that contains a representation of the event returned.

", + "ExcludeManagementEventSources$member": null, "GetEventSelectorsRequest$TrailName": "

Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements:

If you specify a trail ARN, it must be in the format:

arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

", "GetEventSelectorsResponse$TrailARN": "

The specified trail ARN that has the event selectors.

", + "GetInsightSelectorsRequest$TrailName": "

Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements:

If you specify a trail ARN, it must be in the format:

arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

", + "GetInsightSelectorsResponse$TrailARN": "

The Amazon Resource Name (ARN) of a trail for which you want to get Insights selectors.

", "GetTrailRequest$Name": "

The name or the Amazon Resource Name (ARN) of the trail for which you want to retrieve settings information.

", "GetTrailStatusRequest$Name": "

Specifies the name or the CloudTrail ARN of the trail for which you are requesting status. To get the status of a shadow trail (a replication of the trail in another region), you must specify its ARN. The format of a trail ARN is:

arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

", "GetTrailStatusResponse$LatestDeliveryError": "

Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. For more information see the topic Error Responses in the Amazon S3 API Reference.

This error occurs only when there is a problem with the destination S3 bucket and will not occur for timeouts. To resolve the issue, create a new bucket and call UpdateTrail to specify the new bucket, or fix the existing objects so that CloudTrail can again write to the bucket.

", @@ -564,12 +637,14 @@ "ListPublicKeysResponse$NextToken": "

Reserved for future use.

", "ListTagsRequest$NextToken": "

Reserved for future use.

", "ListTagsResponse$NextToken": "

Reserved for future use.

", - "ListTrailsRequest$NextToken": null, - "ListTrailsResponse$NextToken": null, + "ListTrailsRequest$NextToken": "

The token to use to get the next page of results after a previous API call. This token must be passed in with the same parameters that were specified in the the original call. For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters.

", + "ListTrailsResponse$NextToken": "

The token to use to get the next page of results after a previous API call. If the token does not appear, there are no more results to return. The token must be passed in with the same parameters as the previous call. For example, if the original call specified an AttributeKey of 'Username' with a value of 'root', the call with NextToken should include those same parameters.

", "LookupAttribute$AttributeValue": "

Specifies a value for the specified AttributeKey.

", "PublicKey$Fingerprint": "

The fingerprint of the public key.

", "PutEventSelectorsRequest$TrailName": "

Specifies the name of the trail or trail ARN. If you specify a trail name, the string must meet the following requirements:

If you specify a trail ARN, it must be in the format:

arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

", "PutEventSelectorsResponse$TrailARN": "

Specifies the ARN of the trail that was updated with event selectors. The format of a trail ARN is:

arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

", + "PutInsightSelectorsRequest$TrailName": "

The name of the CloudTrail trail for which you want to change or add Insights selectors.

", + "PutInsightSelectorsResponse$TrailARN": "

The Amazon Resource Name (ARN) of a trail for which you want to change or add Insights selectors.

", "RemoveTagsRequest$ResourceId": "

Specifies the ARN of the trail from which tags should be removed. The format of a trail ARN is:

arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail

", "Resource$ResourceType": "

The type of a resource referenced by the event returned. When the resource type cannot be determined, null is returned. Some examples of resource types are: Instance for EC2, Trail for CloudTrail, DBInstance for RDS, and AccessKey for IAM. To learn more about how to look up and filter events by the resource types supported for a service, see Filtering CloudTrail Events.

", "Resource$ResourceName": "

The name of the resource referenced by the event returned. These are user-created names whose values will depend on the environment. For example, the resource name might be \"auto-scaling-test-group\" for an Auto Scaling Group or \"i-1234567\" for an EC2 Instance.

", @@ -652,7 +727,7 @@ "TrailList": { "base": null, "refs": { - "DescribeTrailsResponse$trailList": "

The list of trail objects.

" + "DescribeTrailsResponse$trailList": "

The list of trail objects. Trail objects with string values are only returned if values for the objects exist in a trail's configuration. For example, SNSTopicName and SNSTopicARN are only returned in results if a trail is configured to send SNS notifications. Similarly, KMSKeyId only appears in results if a trail's log files are encrypted with AWS KMS-managed keys.

" } }, "TrailNameList": { diff --git a/models/apis/codebuild/2016-10-06/api-2.json b/models/apis/codebuild/2016-10-06/api-2.json index da06326d1d5..d317dae4748 100644 --- a/models/apis/codebuild/2016-10-06/api-2.json +++ b/models/apis/codebuild/2016-10-06/api-2.json @@ -465,7 +465,8 @@ "enum":[ "BUILD_GENERAL1_SMALL", "BUILD_GENERAL1_MEDIUM", - "BUILD_GENERAL1_LARGE" + "BUILD_GENERAL1_LARGE", + "BUILD_GENERAL1_2XLARGE" ] }, "CreateProjectInput":{ @@ -598,7 +599,9 @@ "type":"string", "enum":[ "WINDOWS_CONTAINER", - "LINUX_CONTAINER" + "LINUX_CONTAINER", + "LINUX_GPU_CONTAINER", + "ARM_CONTAINER" ] }, "EnvironmentVariable":{ diff --git a/models/apis/codebuild/2016-10-06/docs-2.json b/models/apis/codebuild/2016-10-06/docs-2.json index b0e48150d99..df6d17a6cea 100644 --- a/models/apis/codebuild/2016-10-06/docs-2.json +++ b/models/apis/codebuild/2016-10-06/docs-2.json @@ -1,10 +1,10 @@ { "version": "2.0", - "service": "AWS CodeBuild

AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about AWS CodeBuild, see the AWS CodeBuild User Guide.

AWS CodeBuild supports these operations:

", + "service": "AWS CodeBuild

AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about AWS CodeBuild, see the AWS CodeBuild User Guide.

AWS CodeBuild supports these operations:

", "operations": { "BatchDeleteBuilds": "

Deletes one or more builds.

", - "BatchGetBuilds": "

Gets information about builds.

", - "BatchGetProjects": "

Gets information about build projects.

", + "BatchGetBuilds": "

Gets information about one or more builds.

", + "BatchGetProjects": "

Gets information about one or more build projects.

", "CreateProject": "

Creates a build project.

", "CreateWebhook": "

For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.

If you enable webhooks for an AWS CodeBuild project, and the project is used as a build step in AWS CodePipeline, then two identical builds are created for each commit. One build is triggered through webhooks, and one through AWS CodePipeline. Because billing is on a per-build basis, you are billed for both builds. Therefore, if you are using AWS CodePipeline, we recommend that you disable webhooks in AWS CodeBuild. In the AWS CodeBuild console, clear the Webhook box. For more information, see step 5 in Change a Build Project's Settings.

", "DeleteProject": "

Deletes a build project.

", @@ -181,7 +181,7 @@ "ComputeType": { "base": null, "refs": { - "ProjectEnvironment$computeType": "

Information about the compute resources the build project uses. Available values include:

For more information, see Build Environment Compute Types in the AWS CodeBuild User Guide.

", + "ProjectEnvironment$computeType": "

Information about the compute resources the build project uses. Available values include:

If you use BUILD_GENERAL1_LARGE:

For more information, see Build Environment Compute Types in the AWS CodeBuild User Guide.

", "StartBuildInput$computeTypeOverride": "

The name of a compute type for this build that overrides the one specified in the build project.

" } }, @@ -280,7 +280,7 @@ "EnvironmentType": { "base": null, "refs": { - "ProjectEnvironment$type": "

The type of build environment to use for related builds.

", + "ProjectEnvironment$type": "

The type of build environment to use for related builds.

", "StartBuildInput$environmentTypeOverride": "

A container type for this build that overrides the one specified in the build project.

" } }, diff --git a/models/apis/codecommit/2015-04-13/api-2.json b/models/apis/codecommit/2015-04-13/api-2.json index bc89b924f5a..64c3aa20d82 100644 --- a/models/apis/codecommit/2015-04-13/api-2.json +++ b/models/apis/codecommit/2015-04-13/api-2.json @@ -13,6 +13,49 @@ "uid":"codecommit-2015-04-13" }, "operations":{ + "AssociateApprovalRuleTemplateWithRepository":{ + "name":"AssociateApprovalRuleTemplateWithRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateApprovalRuleTemplateWithRepositoryInput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"MaximumRuleTemplatesAssociatedWithRepositoryException"}, + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "BatchAssociateApprovalRuleTemplateWithRepositories":{ + "name":"BatchAssociateApprovalRuleTemplateWithRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchAssociateApprovalRuleTemplateWithRepositoriesInput"}, + "output":{"shape":"BatchAssociateApprovalRuleTemplateWithRepositoriesOutput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"RepositoryNamesRequiredException"}, + {"shape":"MaximumRepositoryNamesExceededException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "BatchDescribeMergeConflicts":{ "name":"BatchDescribeMergeConflicts", "http":{ @@ -45,6 +88,27 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "BatchDisassociateApprovalRuleTemplateFromRepositories":{ + "name":"BatchDisassociateApprovalRuleTemplateFromRepositories", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDisassociateApprovalRuleTemplateFromRepositoriesInput"}, + "output":{"shape":"BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"RepositoryNamesRequiredException"}, + {"shape":"MaximumRepositoryNamesExceededException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "BatchGetCommits":{ "name":"BatchGetCommits", "http":{ @@ -85,6 +149,24 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "CreateApprovalRuleTemplate":{ + "name":"CreateApprovalRuleTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateApprovalRuleTemplateInput"}, + "output":{"shape":"CreateApprovalRuleTemplateOutput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateNameAlreadyExistsException"}, + {"shape":"ApprovalRuleTemplateContentRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateContentException"}, + {"shape":"InvalidApprovalRuleTemplateDescriptionException"}, + {"shape":"NumberOfRuleTemplatesExceededException"} + ] + }, "CreateBranch":{ "name":"CreateBranch", "http":{ @@ -194,6 +276,32 @@ {"shape":"SourceAndDestinationAreSameException"} ] }, + "CreatePullRequestApprovalRule":{ + "name":"CreatePullRequestApprovalRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePullRequestApprovalRuleInput"}, + "output":{"shape":"CreatePullRequestApprovalRuleOutput"}, + "errors":[ + {"shape":"ApprovalRuleNameRequiredException"}, + {"shape":"InvalidApprovalRuleNameException"}, + {"shape":"ApprovalRuleNameAlreadyExistsException"}, + {"shape":"ApprovalRuleContentRequiredException"}, + {"shape":"InvalidApprovalRuleContentException"}, + {"shape":"NumberOfRulesExceededException"}, + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"PullRequestAlreadyClosedException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "CreateRepository":{ "name":"CreateRepository", "http":{ @@ -266,6 +374,20 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "DeleteApprovalRuleTemplate":{ + "name":"DeleteApprovalRuleTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteApprovalRuleTemplateInput"}, + "output":{"shape":"DeleteApprovalRuleTemplateOutput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateInUseException"} + ] + }, "DeleteBranch":{ "name":"DeleteBranch", "http":{ @@ -336,6 +458,29 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "DeletePullRequestApprovalRule":{ + "name":"DeletePullRequestApprovalRule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeletePullRequestApprovalRuleInput"}, + "output":{"shape":"DeletePullRequestApprovalRuleOutput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"PullRequestAlreadyClosedException"}, + {"shape":"ApprovalRuleNameRequiredException"}, + {"shape":"InvalidApprovalRuleNameException"}, + {"shape":"CannotDeleteApprovalRuleFromTemplateException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "DeleteRepository":{ "name":"DeleteRepository", "http":{ @@ -412,6 +557,63 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "DisassociateApprovalRuleTemplateFromRepository":{ + "name":"DisassociateApprovalRuleTemplateFromRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateApprovalRuleTemplateFromRepositoryInput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "EvaluatePullRequestApprovalRules":{ + "name":"EvaluatePullRequestApprovalRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EvaluatePullRequestApprovalRulesInput"}, + "output":{"shape":"EvaluatePullRequestApprovalRulesOutput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"InvalidRevisionIdException"}, + {"shape":"RevisionIdRequiredException"}, + {"shape":"RevisionNotCurrentException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "GetApprovalRuleTemplate":{ + "name":"GetApprovalRuleTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetApprovalRuleTemplateInput"}, + "output":{"shape":"GetApprovalRuleTemplateOutput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"} + ] + }, "GetBlob":{ "name":"GetBlob", "http":{ @@ -725,6 +927,48 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "GetPullRequestApprovalStates":{ + "name":"GetPullRequestApprovalStates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPullRequestApprovalStatesInput"}, + "output":{"shape":"GetPullRequestApprovalStatesOutput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"InvalidRevisionIdException"}, + {"shape":"RevisionIdRequiredException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "GetPullRequestOverrideState":{ + "name":"GetPullRequestOverrideState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetPullRequestOverrideStateInput"}, + "output":{"shape":"GetPullRequestOverrideStateOutput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"InvalidRevisionIdException"}, + {"shape":"RevisionIdRequiredException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "GetRepository":{ "name":"GetRepository", "http":{ @@ -763,6 +1007,40 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "ListApprovalRuleTemplates":{ + "name":"ListApprovalRuleTemplates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListApprovalRuleTemplatesInput"}, + "output":{"shape":"ListApprovalRuleTemplatesOutput"}, + "errors":[ + {"shape":"InvalidMaxResultsException"}, + {"shape":"InvalidContinuationTokenException"} + ] + }, + "ListAssociatedApprovalRuleTemplatesForRepository":{ + "name":"ListAssociatedApprovalRuleTemplatesForRepository", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAssociatedApprovalRuleTemplatesForRepositoryInput"}, + "output":{"shape":"ListAssociatedApprovalRuleTemplatesForRepositoryOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"InvalidMaxResultsException"}, + {"shape":"InvalidContinuationTokenException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "ListBranches":{ "name":"ListBranches", "http":{ @@ -821,6 +1099,27 @@ {"shape":"InvalidContinuationTokenException"} ] }, + "ListRepositoriesForApprovalRuleTemplate":{ + "name":"ListRepositoriesForApprovalRuleTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRepositoriesForApprovalRuleTemplateInput"}, + "output":{"shape":"ListRepositoriesForApprovalRuleTemplateOutput"}, + "errors":[ + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"InvalidMaxResultsException"}, + {"shape":"InvalidContinuationTokenException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -988,6 +1287,7 @@ {"shape":"InvalidRepositoryNameException"}, {"shape":"RepositoryDoesNotExistException"}, {"shape":"ConcurrentReferenceUpdateException"}, + {"shape":"PullRequestApprovalRulesNotSatisfiedException"}, {"shape":"EncryptionIntegrityChecksFailedException"}, {"shape":"EncryptionKeyAccessDeniedException"}, {"shape":"EncryptionKeyDisabledException"}, @@ -1036,6 +1336,7 @@ {"shape":"InvalidRepositoryNameException"}, {"shape":"RepositoryDoesNotExistException"}, {"shape":"RepositoryNotAssociatedWithPullRequestException"}, + {"shape":"PullRequestApprovalRulesNotSatisfiedException"}, {"shape":"EncryptionIntegrityChecksFailedException"}, {"shape":"EncryptionKeyAccessDeniedException"}, {"shape":"EncryptionKeyDisabledException"}, @@ -1084,6 +1385,32 @@ {"shape":"RepositoryDoesNotExistException"}, {"shape":"RepositoryNotAssociatedWithPullRequestException"}, {"shape":"ConcurrentReferenceUpdateException"}, + {"shape":"PullRequestApprovalRulesNotSatisfiedException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "OverridePullRequestApprovalRules":{ + "name":"OverridePullRequestApprovalRules", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"OverridePullRequestApprovalRulesInput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"InvalidRevisionIdException"}, + {"shape":"RevisionIdRequiredException"}, + {"shape":"InvalidOverrideStatusException"}, + {"shape":"OverrideStatusRequiredException"}, + {"shape":"OverrideAlreadySetException"}, + {"shape":"RevisionNotCurrentException"}, + {"shape":"PullRequestAlreadyClosedException"}, {"shape":"EncryptionIntegrityChecksFailedException"}, {"shape":"EncryptionKeyAccessDeniedException"}, {"shape":"EncryptionKeyDisabledException"}, @@ -1329,6 +1656,53 @@ {"shape":"TagPolicyException"} ] }, + "UpdateApprovalRuleTemplateContent":{ + "name":"UpdateApprovalRuleTemplateContent", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApprovalRuleTemplateContentInput"}, + "output":{"shape":"UpdateApprovalRuleTemplateContentOutput"}, + "errors":[ + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"InvalidApprovalRuleTemplateContentException"}, + {"shape":"InvalidRuleContentSha256Exception"}, + {"shape":"ApprovalRuleTemplateContentRequiredException"} + ] + }, + "UpdateApprovalRuleTemplateDescription":{ + "name":"UpdateApprovalRuleTemplateDescription", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApprovalRuleTemplateDescriptionInput"}, + "output":{"shape":"UpdateApprovalRuleTemplateDescriptionOutput"}, + "errors":[ + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"InvalidApprovalRuleTemplateDescriptionException"} + ] + }, + "UpdateApprovalRuleTemplateName":{ + "name":"UpdateApprovalRuleTemplateName", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateApprovalRuleTemplateNameInput"}, + "output":{"shape":"UpdateApprovalRuleTemplateNameOutput"}, + "errors":[ + {"shape":"InvalidApprovalRuleTemplateNameException"}, + {"shape":"ApprovalRuleTemplateNameRequiredException"}, + {"shape":"ApprovalRuleTemplateDoesNotExistException"}, + {"shape":"ApprovalRuleTemplateNameAlreadyExistsException"} + ] + }, "UpdateComment":{ "name":"UpdateComment", "http":{ @@ -1368,6 +1742,59 @@ {"shape":"EncryptionKeyUnavailableException"} ] }, + "UpdatePullRequestApprovalRuleContent":{ + "name":"UpdatePullRequestApprovalRuleContent", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePullRequestApprovalRuleContentInput"}, + "output":{"shape":"UpdatePullRequestApprovalRuleContentOutput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"PullRequestAlreadyClosedException"}, + {"shape":"ApprovalRuleNameRequiredException"}, + {"shape":"InvalidApprovalRuleNameException"}, + {"shape":"ApprovalRuleDoesNotExistException"}, + {"shape":"InvalidRuleContentSha256Exception"}, + {"shape":"ApprovalRuleContentRequiredException"}, + {"shape":"InvalidApprovalRuleContentException"}, + {"shape":"CannotModifyApprovalRuleFromTemplateException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, + "UpdatePullRequestApprovalState":{ + "name":"UpdatePullRequestApprovalState", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdatePullRequestApprovalStateInput"}, + "errors":[ + {"shape":"PullRequestDoesNotExistException"}, + {"shape":"InvalidPullRequestIdException"}, + {"shape":"PullRequestIdRequiredException"}, + {"shape":"InvalidRevisionIdException"}, + {"shape":"RevisionIdRequiredException"}, + {"shape":"InvalidApprovalStateException"}, + {"shape":"ApprovalStateRequiredException"}, + {"shape":"PullRequestCannotBeApprovedByAuthorException"}, + {"shape":"RevisionNotCurrentException"}, + {"shape":"PullRequestAlreadyClosedException"}, + {"shape":"MaximumNumberOfApprovalsExceededException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"} + ] + }, "UpdatePullRequestDescription":{ "name":"UpdatePullRequestDescription", "http":{ @@ -1466,13 +1893,228 @@ "exception":true }, "AdditionalData":{"type":"string"}, + "Approval":{ + "type":"structure", + "members":{ + "userArn":{"shape":"Arn"}, + "approvalState":{"shape":"ApprovalState"} + } + }, + "ApprovalList":{ + "type":"list", + "member":{"shape":"Approval"} + }, + "ApprovalRule":{ + "type":"structure", + "members":{ + "approvalRuleId":{"shape":"ApprovalRuleId"}, + "approvalRuleName":{"shape":"ApprovalRuleName"}, + "approvalRuleContent":{"shape":"ApprovalRuleContent"}, + "ruleContentSha256":{"shape":"RuleContentSha256"}, + "lastModifiedDate":{"shape":"LastModifiedDate"}, + "creationDate":{"shape":"CreationDate"}, + "lastModifiedUser":{"shape":"Arn"}, + "originApprovalRuleTemplate":{"shape":"OriginApprovalRuleTemplate"} + } + }, + "ApprovalRuleContent":{ + "type":"string", + "max":3000, + "min":1 + }, + "ApprovalRuleContentRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleEventMetadata":{ + "type":"structure", + "members":{ + "approvalRuleName":{"shape":"ApprovalRuleName"}, + "approvalRuleId":{"shape":"ApprovalRuleId"}, + "approvalRuleContent":{"shape":"ApprovalRuleContent"} + } + }, + "ApprovalRuleId":{"type":"string"}, + "ApprovalRuleName":{ + "type":"string", + "max":100, + "min":1 + }, + "ApprovalRuleNameAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleOverriddenEventMetadata":{ + "type":"structure", + "members":{ + "revisionId":{"shape":"RevisionId"}, + "overrideStatus":{"shape":"OverrideStatus"} + } + }, + "ApprovalRuleTemplate":{ + "type":"structure", + "members":{ + "approvalRuleTemplateId":{"shape":"ApprovalRuleTemplateId"}, + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "approvalRuleTemplateDescription":{"shape":"ApprovalRuleTemplateDescription"}, + "approvalRuleTemplateContent":{"shape":"ApprovalRuleTemplateContent"}, + "ruleContentSha256":{"shape":"RuleContentSha256"}, + "lastModifiedDate":{"shape":"LastModifiedDate"}, + "creationDate":{"shape":"CreationDate"}, + "lastModifiedUser":{"shape":"Arn"} + } + }, + "ApprovalRuleTemplateContent":{ + "type":"string", + "max":3000, + "min":1 + }, + "ApprovalRuleTemplateContentRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleTemplateDescription":{ + "type":"string", + "max":1000, + "min":0 + }, + "ApprovalRuleTemplateDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleTemplateId":{"type":"string"}, + "ApprovalRuleTemplateInUseException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleTemplateName":{ + "type":"string", + "max":100, + "min":1 + }, + "ApprovalRuleTemplateNameAlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRuleTemplateNameList":{ + "type":"list", + "member":{"shape":"ApprovalRuleTemplateName"} + }, + "ApprovalRuleTemplateNameRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "ApprovalRulesList":{ + "type":"list", + "member":{"shape":"ApprovalRule"} + }, + "ApprovalRulesNotSatisfiedList":{ + "type":"list", + "member":{"shape":"ApprovalRuleName"} + }, + "ApprovalRulesSatisfiedList":{ + "type":"list", + "member":{"shape":"ApprovalRuleName"} + }, + "ApprovalState":{ + "type":"string", + "enum":[ + "APPROVE", + "REVOKE" + ] + }, + "ApprovalStateChangedEventMetadata":{ + "type":"structure", + "members":{ + "revisionId":{"shape":"RevisionId"}, + "approvalStatus":{"shape":"ApprovalState"} + } + }, + "ApprovalStateRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "Approved":{"type":"boolean"}, "Arn":{"type":"string"}, + "AssociateApprovalRuleTemplateWithRepositoryInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "repositoryName" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "repositoryName":{"shape":"RepositoryName"} + } + }, "AuthorDoesNotExistException":{ "type":"structure", "members":{ }, "exception":true }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesError":{ + "type":"structure", + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "errorCode":{"shape":"ErrorCode"}, + "errorMessage":{"shape":"ErrorMessage"} + } + }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesErrorsList":{ + "type":"list", + "member":{"shape":"BatchAssociateApprovalRuleTemplateWithRepositoriesError"} + }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "repositoryNames" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "repositoryNames":{"shape":"RepositoryNameList"} + } + }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesOutput":{ + "type":"structure", + "required":[ + "associatedRepositoryNames", + "errors" + ], + "members":{ + "associatedRepositoryNames":{"shape":"RepositoryNameList"}, + "errors":{"shape":"BatchAssociateApprovalRuleTemplateWithRepositoriesErrorsList"} + } + }, "BatchDescribeMergeConflictsError":{ "type":"structure", "required":[ @@ -1527,6 +2169,40 @@ "baseCommitId":{"shape":"ObjectId"} } }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesError":{ + "type":"structure", + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "errorCode":{"shape":"ErrorCode"}, + "errorMessage":{"shape":"ErrorMessage"} + } + }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesErrorsList":{ + "type":"list", + "member":{"shape":"BatchDisassociateApprovalRuleTemplateFromRepositoriesError"} + }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "repositoryNames" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "repositoryNames":{"shape":"RepositoryNameList"} + } + }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput":{ + "type":"structure", + "required":[ + "disassociatedRepositoryNames", + "errors" + ], + "members":{ + "disassociatedRepositoryNames":{"shape":"RepositoryNameList"}, + "errors":{"shape":"BatchDisassociateApprovalRuleTemplateFromRepositoriesErrorsList"} + } + }, "BatchGetCommitsError":{ "type":"structure", "members":{ @@ -1637,6 +2313,18 @@ }, "exception":true }, + "CannotDeleteApprovalRuleFromTemplateException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "CannotModifyApprovalRuleFromTemplateException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "CapitalBoolean":{"type":"boolean"}, "ChangeTypeEnum":{ "type":"string", @@ -1867,6 +2555,25 @@ "member":{"shape":"Conflict"} }, "Content":{"type":"string"}, + "CreateApprovalRuleTemplateInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "approvalRuleTemplateContent" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "approvalRuleTemplateContent":{"shape":"ApprovalRuleTemplateContent"}, + "approvalRuleTemplateDescription":{"shape":"ApprovalRuleTemplateDescription"} + } + }, + "CreateApprovalRuleTemplateOutput":{ + "type":"structure", + "required":["approvalRuleTemplate"], + "members":{ + "approvalRuleTemplate":{"shape":"ApprovalRuleTemplate"} + } + }, "CreateBranchInput":{ "type":"structure", "required":[ @@ -1909,6 +2616,26 @@ "filesDeleted":{"shape":"FilesMetadata"} } }, + "CreatePullRequestApprovalRuleInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "approvalRuleName", + "approvalRuleContent" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "approvalRuleName":{"shape":"ApprovalRuleName"}, + "approvalRuleContent":{"shape":"ApprovalRuleContent"} + } + }, + "CreatePullRequestApprovalRuleOutput":{ + "type":"structure", + "required":["approvalRule"], + "members":{ + "approvalRule":{"shape":"ApprovalRule"} + } + }, "CreatePullRequestInput":{ "type":"structure", "required":[ @@ -1984,6 +2711,20 @@ }, "exception":true }, + "DeleteApprovalRuleTemplateInput":{ + "type":"structure", + "required":["approvalRuleTemplateName"], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"} + } + }, + "DeleteApprovalRuleTemplateOutput":{ + "type":"structure", + "required":["approvalRuleTemplateId"], + "members":{ + "approvalRuleTemplateId":{"shape":"ApprovalRuleTemplateId"} + } + }, "DeleteBranchInput":{ "type":"structure", "required":[ @@ -2059,6 +2800,24 @@ "filePath":{"shape":"Path"} } }, + "DeletePullRequestApprovalRuleInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "approvalRuleName" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "approvalRuleName":{"shape":"ApprovalRuleName"} + } + }, + "DeletePullRequestApprovalRuleOutput":{ + "type":"structure", + "required":["approvalRuleId"], + "members":{ + "approvalRuleId":{"shape":"ApprovalRuleId"} + } + }, "DeleteRepositoryInput":{ "type":"structure", "required":["repositoryName"], @@ -2151,6 +2910,17 @@ }, "exception":true }, + "DisassociateApprovalRuleTemplateFromRepositoryInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "repositoryName" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "repositoryName":{"shape":"RepositoryName"} + } + }, "Email":{"type":"string"}, "EncryptionIntegrityChecksFailedException":{ "type":"structure", @@ -2185,6 +2955,33 @@ }, "ErrorCode":{"type":"string"}, "ErrorMessage":{"type":"string"}, + "EvaluatePullRequestApprovalRulesInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "revisionId" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "revisionId":{"shape":"RevisionId"} + } + }, + "EvaluatePullRequestApprovalRulesOutput":{ + "type":"structure", + "required":["evaluation"], + "members":{ + "evaluation":{"shape":"Evaluation"} + } + }, + "Evaluation":{ + "type":"structure", + "members":{ + "approved":{"shape":"Approved"}, + "overridden":{"shape":"Overridden"}, + "approvalRulesSatisfied":{"shape":"ApprovalRulesSatisfiedList"}, + "approvalRulesNotSatisfied":{"shape":"ApprovalRulesNotSatisfiedList"} + } + }, "EventDate":{"type":"timestamp"}, "ExceptionName":{"type":"string"}, "File":{ @@ -2323,6 +3120,20 @@ "type":"list", "member":{"shape":"Folder"} }, + "GetApprovalRuleTemplateInput":{ + "type":"structure", + "required":["approvalRuleTemplateName"], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"} + } + }, + "GetApprovalRuleTemplateOutput":{ + "type":"structure", + "required":["approvalRuleTemplate"], + "members":{ + "approvalRuleTemplate":{"shape":"ApprovalRuleTemplate"} + } + }, "GetBlobInput":{ "type":"structure", "required":[ @@ -2597,6 +3408,23 @@ "baseCommitId":{"shape":"ObjectId"} } }, + "GetPullRequestApprovalStatesInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "revisionId" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "revisionId":{"shape":"RevisionId"} + } + }, + "GetPullRequestApprovalStatesOutput":{ + "type":"structure", + "members":{ + "approvals":{"shape":"ApprovalList"} + } + }, "GetPullRequestInput":{ "type":"structure", "required":["pullRequestId"], @@ -2611,6 +3439,24 @@ "pullRequest":{"shape":"PullRequest"} } }, + "GetPullRequestOverrideStateInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "revisionId" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "revisionId":{"shape":"RevisionId"} + } + }, + "GetPullRequestOverrideStateOutput":{ + "type":"structure", + "members":{ + "overridden":{"shape":"Overridden"}, + "overrider":{"shape":"Arn"} + } + }, "GetRepositoryInput":{ "type":"structure", "required":["repositoryName"], @@ -2651,6 +3497,42 @@ }, "exception":true }, + "InvalidApprovalRuleContentException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidApprovalRuleNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidApprovalRuleTemplateContentException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidApprovalRuleTemplateDescriptionException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidApprovalRuleTemplateNameException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidApprovalStateException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidAuthorArnException":{ "type":"structure", "members":{ @@ -2789,6 +3671,12 @@ }, "exception":true }, + "InvalidOverrideStatusException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidParentCommitIdException":{ "type":"structure", "members":{ @@ -2903,6 +3791,18 @@ }, "exception":true }, + "InvalidRevisionIdException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "InvalidRuleContentSha256Exception":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "InvalidSortByException":{ "type":"structure", "members":{ @@ -2980,6 +3880,36 @@ "box":true }, "LineNumber":{"type":"integer"}, + "ListApprovalRuleTemplatesInput":{ + "type":"structure", + "members":{ + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "ListApprovalRuleTemplatesOutput":{ + "type":"structure", + "members":{ + "approvalRuleTemplateNames":{"shape":"ApprovalRuleTemplateNameList"}, + "nextToken":{"shape":"NextToken"} + } + }, + "ListAssociatedApprovalRuleTemplatesForRepositoryInput":{ + "type":"structure", + "required":["repositoryName"], + "members":{ + "repositoryName":{"shape":"RepositoryName"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "ListAssociatedApprovalRuleTemplatesForRepositoryOutput":{ + "type":"structure", + "members":{ + "approvalRuleTemplateNames":{"shape":"ApprovalRuleTemplateNameList"}, + "nextToken":{"shape":"NextToken"} + } + }, "ListBranchesInput":{ "type":"structure", "required":["repositoryName"], @@ -3014,6 +3944,22 @@ "nextToken":{"shape":"NextToken"} } }, + "ListRepositoriesForApprovalRuleTemplateInput":{ + "type":"structure", + "required":["approvalRuleTemplateName"], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "ListRepositoriesForApprovalRuleTemplateOutput":{ + "type":"structure", + "members":{ + "repositoryNames":{"shape":"RepositoryNameList"}, + "nextToken":{"shape":"NextToken"} + } + }, "ListRepositoriesInput":{ "type":"structure", "members":{ @@ -3089,6 +4035,12 @@ }, "exception":true }, + "MaximumNumberOfApprovalsExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "MaximumOpenPullRequestsExceededException":{ "type":"structure", "members":{ @@ -3107,6 +4059,12 @@ }, "exception":true }, + "MaximumRuleTemplatesAssociatedWithRepositoryException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "MergeBranchesByFastForwardInput":{ "type":"structure", "required":[ @@ -3336,6 +4294,18 @@ "exception":true }, "NumberOfConflicts":{"type":"integer"}, + "NumberOfRuleTemplatesExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "NumberOfRulesExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "ObjectId":{"type":"string"}, "ObjectSize":{"type":"long"}, "ObjectTypeEnum":{ @@ -3362,6 +4332,46 @@ "descending" ] }, + "OriginApprovalRuleTemplate":{ + "type":"structure", + "members":{ + "approvalRuleTemplateId":{"shape":"ApprovalRuleTemplateId"}, + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"} + } + }, + "Overridden":{"type":"boolean"}, + "OverrideAlreadySetException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "OverridePullRequestApprovalRulesInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "revisionId", + "overrideStatus" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "revisionId":{"shape":"RevisionId"}, + "overrideStatus":{"shape":"OverrideStatus"} + } + }, + "OverrideStatus":{ + "type":"string", + "enum":[ + "OVERRIDE", + "REVOKE" + ] + }, + "OverrideStatusRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "ParentCommitDoesNotExistException":{ "type":"structure", "members":{ @@ -3496,7 +4506,9 @@ "pullRequestStatus":{"shape":"PullRequestStatusEnum"}, "authorArn":{"shape":"Arn"}, "pullRequestTargets":{"shape":"PullRequestTargetList"}, - "clientRequestToken":{"shape":"ClientRequestToken"} + "clientRequestToken":{"shape":"ClientRequestToken"}, + "revisionId":{"shape":"RevisionId"}, + "approvalRules":{"shape":"ApprovalRulesList"} } }, "PullRequestAlreadyClosedException":{ @@ -3505,6 +4517,18 @@ }, "exception":true }, + "PullRequestApprovalRulesNotSatisfiedException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "PullRequestCannotBeApprovedByAuthorException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "PullRequestCreatedEventMetadata":{ "type":"structure", "members":{ @@ -3530,7 +4554,10 @@ "pullRequestCreatedEventMetadata":{"shape":"PullRequestCreatedEventMetadata"}, "pullRequestStatusChangedEventMetadata":{"shape":"PullRequestStatusChangedEventMetadata"}, "pullRequestSourceReferenceUpdatedEventMetadata":{"shape":"PullRequestSourceReferenceUpdatedEventMetadata"}, - "pullRequestMergedStateChangedEventMetadata":{"shape":"PullRequestMergedStateChangedEventMetadata"} + "pullRequestMergedStateChangedEventMetadata":{"shape":"PullRequestMergedStateChangedEventMetadata"}, + "approvalRuleEventMetadata":{"shape":"ApprovalRuleEventMetadata"}, + "approvalStateChangedEventMetadata":{"shape":"ApprovalStateChangedEventMetadata"}, + "approvalRuleOverriddenEventMetadata":{"shape":"ApprovalRuleOverriddenEventMetadata"} } }, "PullRequestEventList":{ @@ -3543,7 +4570,12 @@ "PULL_REQUEST_CREATED", "PULL_REQUEST_STATUS_CHANGED", "PULL_REQUEST_SOURCE_REFERENCE_UPDATED", - "PULL_REQUEST_MERGE_STATE_CHANGED" + "PULL_REQUEST_MERGE_STATE_CHANGED", + "PULL_REQUEST_APPROVAL_RULE_CREATED", + "PULL_REQUEST_APPROVAL_RULE_UPDATED", + "PULL_REQUEST_APPROVAL_RULE_DELETED", + "PULL_REQUEST_APPROVAL_RULE_OVERRIDDEN", + "PULL_REQUEST_APPROVAL_STATE_CHANGED" ] }, "PullRequestId":{"type":"string"}, @@ -3922,6 +4954,20 @@ }, "exception":true }, + "RevisionId":{"type":"string"}, + "RevisionIdRequiredException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RevisionNotCurrentException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "RuleContentSha256":{"type":"string"}, "SameFileContentException":{ "type":"structure", "members":{ @@ -4134,6 +5180,61 @@ "tagKeys":{"shape":"TagKeysList"} } }, + "UpdateApprovalRuleTemplateContentInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "newRuleContent" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "newRuleContent":{"shape":"ApprovalRuleTemplateContent"}, + "existingRuleContentSha256":{"shape":"RuleContentSha256"} + } + }, + "UpdateApprovalRuleTemplateContentOutput":{ + "type":"structure", + "required":["approvalRuleTemplate"], + "members":{ + "approvalRuleTemplate":{"shape":"ApprovalRuleTemplate"} + } + }, + "UpdateApprovalRuleTemplateDescriptionInput":{ + "type":"structure", + "required":[ + "approvalRuleTemplateName", + "approvalRuleTemplateDescription" + ], + "members":{ + "approvalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "approvalRuleTemplateDescription":{"shape":"ApprovalRuleTemplateDescription"} + } + }, + "UpdateApprovalRuleTemplateDescriptionOutput":{ + "type":"structure", + "required":["approvalRuleTemplate"], + "members":{ + "approvalRuleTemplate":{"shape":"ApprovalRuleTemplate"} + } + }, + "UpdateApprovalRuleTemplateNameInput":{ + "type":"structure", + "required":[ + "oldApprovalRuleTemplateName", + "newApprovalRuleTemplateName" + ], + "members":{ + "oldApprovalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"}, + "newApprovalRuleTemplateName":{"shape":"ApprovalRuleTemplateName"} + } + }, + "UpdateApprovalRuleTemplateNameOutput":{ + "type":"structure", + "required":["approvalRuleTemplate"], + "members":{ + "approvalRuleTemplate":{"shape":"ApprovalRuleTemplate"} + } + }, "UpdateCommentInput":{ "type":"structure", "required":[ @@ -4162,6 +5263,40 @@ "defaultBranchName":{"shape":"BranchName"} } }, + "UpdatePullRequestApprovalRuleContentInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "approvalRuleName", + "newRuleContent" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "approvalRuleName":{"shape":"ApprovalRuleName"}, + "existingRuleContentSha256":{"shape":"RuleContentSha256"}, + "newRuleContent":{"shape":"ApprovalRuleContent"} + } + }, + "UpdatePullRequestApprovalRuleContentOutput":{ + "type":"structure", + "required":["approvalRule"], + "members":{ + "approvalRule":{"shape":"ApprovalRule"} + } + }, + "UpdatePullRequestApprovalStateInput":{ + "type":"structure", + "required":[ + "pullRequestId", + "revisionId", + "approvalState" + ], + "members":{ + "pullRequestId":{"shape":"PullRequestId"}, + "revisionId":{"shape":"RevisionId"}, + "approvalState":{"shape":"ApprovalState"} + } + }, "UpdatePullRequestDescriptionInput":{ "type":"structure", "required":[ diff --git a/models/apis/codecommit/2015-04-13/docs-2.json b/models/apis/codecommit/2015-04-13/docs-2.json index 16dedd9bad7..04af554911e 100644 --- a/models/apis/codecommit/2015-04-13/docs-2.json +++ b/models/apis/codecommit/2015-04-13/docs-2.json @@ -1,61 +1,82 @@ { "version": "2.0", - "service": "AWS CodeCommit

This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

You can use the AWS CodeCommit API to work with the following objects:

Repositories, by calling the following:

Branches, by calling the following:

Files, by calling the following:

Commits, by calling the following:

Merges, by calling the following:

Pull requests, by calling the following:

Comments in a repository, by calling the following:

Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following:

Triggers, by calling the following:

For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

", + "service": "AWS CodeCommit

This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.

You can use the AWS CodeCommit API to work with the following objects:

Repositories, by calling the following:

Branches, by calling the following:

Files, by calling the following:

Commits, by calling the following:

Merges, by calling the following:

Pull requests, by calling the following:

Approval rule templates, by calling the following:

Comments in a repository, by calling the following:

Tags used to tag resources in AWS CodeCommit (not Git tags), by calling the following:

Triggers, by calling the following:

For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.

", "operations": { + "AssociateApprovalRuleTemplateWithRepository": "

Creates an association between an approval rule template and a specified repository. Then, the next time a pull request is created in the repository where the destination reference (if specified) matches the destination reference (branch) for the pull request, an approval rule that matches the template conditions is automatically created for that pull request. If no destination references are specified in the template, an approval rule that matches the template contents is created for all pull requests in that repository.

", + "BatchAssociateApprovalRuleTemplateWithRepositories": "

Creates an association between an approval rule template and one or more specified repositories.

", "BatchDescribeMergeConflicts": "

Returns information about one or more merge conflicts in the attempted merge of two commit specifiers using the squash or three-way merge strategy.

", + "BatchDisassociateApprovalRuleTemplateFromRepositories": "

Removes the association between an approval rule template and one or more specified repositories.

", "BatchGetCommits": "

Returns information about the contents of one or more commits in a repository.

", - "BatchGetRepositories": "

Returns information about one or more repositories.

The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

", - "CreateBranch": "

Creates a new branch in a repository and points the branch to a commit.

Calling the create branch operation does not set a repository's default branch. To do this, call the update default branch operation.

", + "BatchGetRepositories": "

Returns information about one or more repositories.

The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a webpage can expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a webpage.

", + "CreateApprovalRuleTemplate": "

Creates a template for approval rules that can then be associated with one or more repositories in your AWS account. When you associate a template with a repository, AWS CodeCommit creates an approval rule that matches the conditions of the template for all pull requests that meet the conditions of the template. For more information, see AssociateApprovalRuleTemplateWithRepository.

", + "CreateBranch": "

Creates a branch in a repository and points the branch to a commit.

Calling the create branch operation does not set a repository's default branch. To do this, call the update default branch operation.

", "CreateCommit": "

Creates a commit for a repository on the tip of a specified branch.

", "CreatePullRequest": "

Creates a pull request in the specified repository.

", + "CreatePullRequestApprovalRule": "

Creates an approval rule for a pull request.

", "CreateRepository": "

Creates a new, empty repository.

", - "CreateUnreferencedMergeCommit": "

Creates an unreferenced commit that represents the result of merging two branches using a specified merge strategy. This can help you determine the outcome of a potential merge. This API cannot be used with the fast-forward merge strategy, as that strategy does not create a merge commit.

This unreferenced merge commit can only be accessed using the GetCommit API or through git commands such as git fetch. To retrieve this commit, you must specify its commit ID or otherwise reference it.

", + "CreateUnreferencedMergeCommit": "

Creates an unreferenced commit that represents the result of merging two branches using a specified merge strategy. This can help you determine the outcome of a potential merge. This API cannot be used with the fast-forward merge strategy because that strategy does not create a merge commit.

This unreferenced merge commit can only be accessed using the GetCommit API or through git commands such as git fetch. To retrieve this commit, you must specify its commit ID or otherwise reference it.

", + "DeleteApprovalRuleTemplate": "

Deletes a specified approval rule template. Deleting a template does not remove approval rules on pull requests already created with the template.

", "DeleteBranch": "

Deletes a branch from a repository, unless that branch is the default branch for the repository.

", "DeleteCommentContent": "

Deletes the content of a comment made on a change, file, or commit in a repository.

", - "DeleteFile": "

Deletes a specified file from a specified branch. A commit is created on the branch that contains the revision. The file will still exist in the commits prior to the commit that contains the deletion.

", - "DeleteRepository": "

Deletes a repository. If a specified repository was already deleted, a null repository ID will be returned.

Deleting a repository also deletes all associated objects and metadata. After a repository is deleted, all future push calls to the deleted repository will fail.

", - "DescribeMergeConflicts": "

Returns information about one or more merge conflicts in the attempted merge of two commit specifiers using the squash or three-way merge strategy. If the merge option for the attempted merge is specified as FAST_FORWARD_MERGE, an exception will be thrown.

", + "DeleteFile": "

Deletes a specified file from a specified branch. A commit is created on the branch that contains the revision. The file still exists in the commits earlier to the commit that contains the deletion.

", + "DeletePullRequestApprovalRule": "

Deletes an approval rule from a specified pull request. Approval rules can be deleted from a pull request only if the pull request is open, and if the approval rule was created specifically for a pull request and not generated from an approval rule template associated with the repository where the pull request was created. You cannot delete an approval rule from a merged or closed pull request.

", + "DeleteRepository": "

Deletes a repository. If a specified repository was already deleted, a null repository ID is returned.

Deleting a repository also deletes all associated objects and metadata. After a repository is deleted, all future push calls to the deleted repository fail.

", + "DescribeMergeConflicts": "

Returns information about one or more merge conflicts in the attempted merge of two commit specifiers using the squash or three-way merge strategy. If the merge option for the attempted merge is specified as FAST_FORWARD_MERGE, an exception is thrown.

", "DescribePullRequestEvents": "

Returns information about one or more pull request events.

", - "GetBlob": "

Returns the base-64 encoded content of an individual blob within a repository.

", + "DisassociateApprovalRuleTemplateFromRepository": "

Removes the association between a template and a repository so that approval rules based on the template are not automatically created when pull requests are created in the specified repository. This does not delete any approval rules previously created for pull requests through the template association.

", + "EvaluatePullRequestApprovalRules": "

Evaluates whether a pull request has met all the conditions specified in its associated approval rules.

", + "GetApprovalRuleTemplate": "

Returns information about a specified approval rule template.

", + "GetBlob": "

Returns the base-64 encoded content of an individual blob in a repository.

", "GetBranch": "

Returns information about a repository branch, including its name and the last commit ID.

", "GetComment": "

Returns the content of a comment made on a change, file, or commit in a repository.

", "GetCommentsForComparedCommit": "

Returns information about comments made on the comparison between two commits.

", "GetCommentsForPullRequest": "

Returns comments made on a pull request.

", "GetCommit": "

Returns information about a commit, including commit message and committer information.

", - "GetDifferences": "

Returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID or other fully qualified reference). Results can be limited to a specified path.

", + "GetDifferences": "

Returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other fully qualified reference). Results can be limited to a specified path.

", "GetFile": "

Returns the base-64 encoded contents of a specified file and its metadata.

", "GetFolder": "

Returns the contents of a specified folder in a repository.

", "GetMergeCommit": "

Returns information about a specified merge commit.

", "GetMergeConflicts": "

Returns information about merge conflicts between the before and after commit IDs for a pull request in a repository.

", - "GetMergeOptions": "

Returns information about the merge options available for merging two specified branches. For details about why a particular merge option is not available, use GetMergeConflicts or DescribeMergeConflicts.

", + "GetMergeOptions": "

Returns information about the merge options available for merging two specified branches. For details about why a merge option is not available, use GetMergeConflicts or DescribeMergeConflicts.

", "GetPullRequest": "

Gets information about a pull request in a specified repository.

", - "GetRepository": "

Returns information about a repository.

The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

", + "GetPullRequestApprovalStates": "

Gets information about the approval states for a specified pull request. Approval states only apply to pull requests that have one or more approval rules applied to them.

", + "GetPullRequestOverrideState": "

Returns information about whether approval rules have been set aside (overridden) for a pull request, and if so, the Amazon Resource Name (ARN) of the user or identity that overrode the rules and their requirements for the pull request.

", + "GetRepository": "

Returns information about a repository.

The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a webpage can expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a webpage.

", "GetRepositoryTriggers": "

Gets information about triggers configured for a repository.

", + "ListApprovalRuleTemplates": "

Lists all approval rule templates in the specified AWS Region in your AWS account. If an AWS Region is not specified, the AWS Region where you are signed in is used.

", + "ListAssociatedApprovalRuleTemplatesForRepository": "

Lists all approval rule templates that are associated with a specified repository.

", "ListBranches": "

Gets information about one or more branches in a repository.

", "ListPullRequests": "

Returns a list of pull requests for a specified repository. The return list can be refined by pull request status or pull request author ARN.

", "ListRepositories": "

Gets information about one or more repositories.

", - "ListTagsForResource": "

Gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

", + "ListRepositoriesForApprovalRuleTemplate": "

Lists all repositories associated with the specified approval rule template.

", + "ListTagsForResource": "

Gets information about AWS tags for a specified Amazon Resource Name (ARN) in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

", "MergeBranchesByFastForward": "

Merges two branches using the fast-forward merge strategy.

", "MergeBranchesBySquash": "

Merges two branches using the squash merge strategy.

", "MergeBranchesByThreeWay": "

Merges two specified branches using the three-way merge strategy.

", "MergePullRequestByFastForward": "

Attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the fast-forward merge strategy. If the merge is successful, it closes the pull request.

", "MergePullRequestBySquash": "

Attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the squash merge strategy. If the merge is successful, it closes the pull request.

", "MergePullRequestByThreeWay": "

Attempts to merge the source commit of a pull request into the specified destination branch for that pull request at the specified commit using the three-way merge strategy. If the merge is successful, it closes the pull request.

", + "OverridePullRequestApprovalRules": "

Sets aside (overrides) all approval rule requirements for a specified pull request.

", "PostCommentForComparedCommit": "

Posts a comment on the comparison between two commits.

", "PostCommentForPullRequest": "

Posts a comment on a pull request.

", "PostCommentReply": "

Posts a comment in reply to an existing comment on a comparison between commits or a pull request.

", "PutFile": "

Adds or updates a file in a branch in an AWS CodeCommit repository, and generates a commit for the addition in the specified branch.

", - "PutRepositoryTriggers": "

Replaces all triggers for a repository. This can be used to create or delete triggers.

", - "TagResource": "

Adds or updates tags for a resource in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

", - "TestRepositoryTriggers": "

Tests the functionality of repository triggers by sending information to the trigger target. If real data is available in the repository, the test will send data from the last commit. If no data is available, sample data will be generated.

", - "UntagResource": "

Removes tags for a resource in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

", + "PutRepositoryTriggers": "

Replaces all triggers for a repository. Used to create or delete triggers.

", + "TagResource": "

Adds or updates tags for a resource in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

", + "TestRepositoryTriggers": "

Tests the functionality of repository triggers by sending information to the trigger target. If real data is available in the repository, the test sends data from the last commit. If no data is available, sample data is generated.

", + "UntagResource": "

Removes tags for a resource in AWS CodeCommit. For a list of valid resources in AWS CodeCommit, see CodeCommit Resources and Operations in the AWS CodeCommit User Guide.

", + "UpdateApprovalRuleTemplateContent": "

Updates the content of an approval rule template. You can change the number of required approvals, the membership of the approval rule, and whether an approval pool is defined.

", + "UpdateApprovalRuleTemplateDescription": "

Updates the description for a specified approval rule template.

", + "UpdateApprovalRuleTemplateName": "

Updates the name of a specified approval rule template.

", "UpdateComment": "

Replaces the contents of a comment.

", "UpdateDefaultBranch": "

Sets or changes the default branch name for the specified repository.

If you use this operation to change the default branch name to the current default branch name, a success message is returned even though the default branch did not change.

", + "UpdatePullRequestApprovalRuleContent": "

Updates the structure of an approval rule created specifically for a pull request. For example, you can change the number of required approvers and the approval pool for approvers.

", + "UpdatePullRequestApprovalState": "

Updates the state of a user's approval on a pull request. The user is derived from the signed-in account when the request is made.

", "UpdatePullRequestDescription": "

Replaces the contents of the description of a pull request.

", "UpdatePullRequestStatus": "

Updates the status of a pull request.

", "UpdatePullRequestTitle": "

Replaces the title of a pull request.

", - "UpdateRepositoryDescription": "

Sets or changes the comment or description for a repository.

The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

", - "UpdateRepositoryName": "

Renames a repository. The repository name must be unique across the calling AWS account. In addition, repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. The suffix \".git\" is prohibited. For a full description of the limits on repository names, see Limits in the AWS CodeCommit User Guide.

" + "UpdateRepositoryDescription": "

Sets or changes the comment or description for a repository.

The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a webpage can expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a webpage.

", + "UpdateRepositoryName": "

Renames a repository. The repository name must be unique across the calling AWS account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. The suffix .git is prohibited. For more information about the limits on repository names, see Limits in the AWS CodeCommit User Guide.

" }, "shapes": { "AccountId": { @@ -72,20 +93,239 @@ "AdditionalData": { "base": null, "refs": { - "Commit$additionalData": "

Any additional data associated with the specified commit.

" + "Commit$additionalData": "

Any other data associated with the specified commit.

" + } + }, + "Approval": { + "base": "

Returns information about a specific approval on a pull request.

", + "refs": { + "ApprovalList$member": null + } + }, + "ApprovalList": { + "base": null, + "refs": { + "GetPullRequestApprovalStatesOutput$approvals": "

Information about users who have approved the pull request.

" + } + }, + "ApprovalRule": { + "base": "

Returns information about an approval rule.

", + "refs": { + "ApprovalRulesList$member": null, + "CreatePullRequestApprovalRuleOutput$approvalRule": "

Information about the created approval rule.

", + "UpdatePullRequestApprovalRuleContentOutput$approvalRule": "

Information about the updated approval rule.

" + } + }, + "ApprovalRuleContent": { + "base": null, + "refs": { + "ApprovalRule$approvalRuleContent": "

The content of the approval rule.

", + "ApprovalRuleEventMetadata$approvalRuleContent": "

The content of the approval rule.

", + "CreatePullRequestApprovalRuleInput$approvalRuleContent": "

The content of the approval rule, including the number of approvals needed and the structure of an approval pool defined for approvals, if any. For more information about approval pools, see the AWS CodeCommit User Guide.

When you create the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

", + "UpdatePullRequestApprovalRuleContentInput$newRuleContent": "

The updated content for the approval rule.

When you update the content of the approval rule, you can specify approvers in an approval pool in one of two ways:

For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

" + } + }, + "ApprovalRuleContentRequiredException": { + "base": "

The content for the approval rule is empty. You must provide some content for an approval rule. The content cannot be null.

", + "refs": { + } + }, + "ApprovalRuleDoesNotExistException": { + "base": "

The specified approval rule does not exist.

", + "refs": { + } + }, + "ApprovalRuleEventMetadata": { + "base": "

Returns information about an event for an approval rule.

", + "refs": { + "PullRequestEvent$approvalRuleEventMetadata": "

Information about a pull request event.

" + } + }, + "ApprovalRuleId": { + "base": null, + "refs": { + "ApprovalRule$approvalRuleId": "

The system-generated ID of the approval rule.

", + "ApprovalRuleEventMetadata$approvalRuleId": "

The system-generated ID of the approval rule.

", + "DeletePullRequestApprovalRuleOutput$approvalRuleId": "

The ID of the deleted approval rule.

If the approval rule was deleted in an earlier API call, the response is 200 OK without content.

" + } + }, + "ApprovalRuleName": { + "base": null, + "refs": { + "ApprovalRule$approvalRuleName": "

The name of the approval rule.

", + "ApprovalRuleEventMetadata$approvalRuleName": "

The name of the approval rule.

", + "ApprovalRulesNotSatisfiedList$member": null, + "ApprovalRulesSatisfiedList$member": null, + "CreatePullRequestApprovalRuleInput$approvalRuleName": "

The name for the approval rule.

", + "DeletePullRequestApprovalRuleInput$approvalRuleName": "

The name of the approval rule you want to delete.

", + "UpdatePullRequestApprovalRuleContentInput$approvalRuleName": "

The name of the approval rule you want to update.

" + } + }, + "ApprovalRuleNameAlreadyExistsException": { + "base": "

An approval rule with that name already exists. Approval rule names must be unique within the scope of a pull request.

", + "refs": { + } + }, + "ApprovalRuleNameRequiredException": { + "base": "

An approval rule name is required, but was not specified.

", + "refs": { + } + }, + "ApprovalRuleOverriddenEventMetadata": { + "base": "

Returns information about an override event for approval rules for a pull request.

", + "refs": { + "PullRequestEvent$approvalRuleOverriddenEventMetadata": "

Information about an approval rule override event for a pull request.

" + } + }, + "ApprovalRuleTemplate": { + "base": "

Returns information about an approval rule template.

", + "refs": { + "CreateApprovalRuleTemplateOutput$approvalRuleTemplate": "

The content and structure of the created approval rule template.

", + "GetApprovalRuleTemplateOutput$approvalRuleTemplate": "

The content and structure of the approval rule template.

", + "UpdateApprovalRuleTemplateContentOutput$approvalRuleTemplate": null, + "UpdateApprovalRuleTemplateDescriptionOutput$approvalRuleTemplate": "

The structure and content of the updated approval rule template.

", + "UpdateApprovalRuleTemplateNameOutput$approvalRuleTemplate": "

The structure and content of the updated approval rule template.

" + } + }, + "ApprovalRuleTemplateContent": { + "base": null, + "refs": { + "ApprovalRuleTemplate$approvalRuleTemplateContent": "

The content of the approval rule template.

", + "CreateApprovalRuleTemplateInput$approvalRuleTemplateContent": "

The content of the approval rule that is created on pull requests in associated repositories. If you specify one or more destination references (branches), approval rules are created in an associated repository only if their destination references (branches) match those specified in the template.

When you create the content of the approval rule template, you can specify approvers in an approval pool in one of two ways:

For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers in the IAM User Guide.

", + "UpdateApprovalRuleTemplateContentInput$newRuleContent": "

The content that replaces the existing content of the rule. Content statements must be complete. You cannot provide only the changes.

" + } + }, + "ApprovalRuleTemplateContentRequiredException": { + "base": "

The content for the approval rule template is empty. You must provide some content for an approval rule template. The content cannot be null.

", + "refs": { + } + }, + "ApprovalRuleTemplateDescription": { + "base": null, + "refs": { + "ApprovalRuleTemplate$approvalRuleTemplateDescription": "

The description of the approval rule template.

", + "CreateApprovalRuleTemplateInput$approvalRuleTemplateDescription": "

The description of the approval rule template. Consider providing a description that explains what this template does and when it might be appropriate to associate it with repositories.

", + "UpdateApprovalRuleTemplateDescriptionInput$approvalRuleTemplateDescription": "

The updated description of the approval rule template.

" + } + }, + "ApprovalRuleTemplateDoesNotExistException": { + "base": "

The specified approval rule template does not exist. Verify that the name is correct and that you are signed in to the AWS Region where the template was created, and then try again.

", + "refs": { + } + }, + "ApprovalRuleTemplateId": { + "base": null, + "refs": { + "ApprovalRuleTemplate$approvalRuleTemplateId": "

The system-generated ID of the approval rule template.

", + "DeleteApprovalRuleTemplateOutput$approvalRuleTemplateId": "

The system-generated ID of the deleted approval rule template. If the template has been previously deleted, the only response is a 200 OK.

", + "OriginApprovalRuleTemplate$approvalRuleTemplateId": "

The ID of the template that created the approval rule.

" + } + }, + "ApprovalRuleTemplateInUseException": { + "base": "

The approval rule template is associated with one or more repositories. You cannot delete a template that is associated with a repository. Remove all associations, and then try again.

", + "refs": { + } + }, + "ApprovalRuleTemplateName": { + "base": null, + "refs": { + "ApprovalRuleTemplate$approvalRuleTemplateName": "

The name of the approval rule template.

", + "ApprovalRuleTemplateNameList$member": null, + "AssociateApprovalRuleTemplateWithRepositoryInput$approvalRuleTemplateName": "

The name for the approval rule template.

", + "BatchAssociateApprovalRuleTemplateWithRepositoriesInput$approvalRuleTemplateName": "

The name of the template you want to associate with one or more repositories.

", + "BatchDisassociateApprovalRuleTemplateFromRepositoriesInput$approvalRuleTemplateName": "

The name of the template that you want to disassociate from one or more repositories.

", + "CreateApprovalRuleTemplateInput$approvalRuleTemplateName": "

The name of the approval rule template. Provide descriptive names, because this name is applied to the approval rules created automatically in associated repositories.

", + "DeleteApprovalRuleTemplateInput$approvalRuleTemplateName": "

The name of the approval rule template to delete.

", + "DisassociateApprovalRuleTemplateFromRepositoryInput$approvalRuleTemplateName": "

The name of the approval rule template to disassociate from a specified repository.

", + "GetApprovalRuleTemplateInput$approvalRuleTemplateName": "

The name of the approval rule template for which you want to get information.

", + "ListRepositoriesForApprovalRuleTemplateInput$approvalRuleTemplateName": "

The name of the approval rule template for which you want to list repositories that are associated with that template.

", + "OriginApprovalRuleTemplate$approvalRuleTemplateName": "

The name of the template that created the approval rule.

", + "UpdateApprovalRuleTemplateContentInput$approvalRuleTemplateName": "

The name of the approval rule template where you want to update the content of the rule.

", + "UpdateApprovalRuleTemplateDescriptionInput$approvalRuleTemplateName": "

The name of the template for which you want to update the description.

", + "UpdateApprovalRuleTemplateNameInput$oldApprovalRuleTemplateName": "

The current name of the approval rule template.

", + "UpdateApprovalRuleTemplateNameInput$newApprovalRuleTemplateName": "

The new name you want to apply to the approval rule template.

" + } + }, + "ApprovalRuleTemplateNameAlreadyExistsException": { + "base": "

You cannot create an approval rule template with that name because a template with that name already exists in this AWS Region for your AWS account. Approval rule template names must be unique.

", + "refs": { + } + }, + "ApprovalRuleTemplateNameList": { + "base": null, + "refs": { + "ListApprovalRuleTemplatesOutput$approvalRuleTemplateNames": "

The names of all the approval rule templates found in the AWS Region for your AWS account.

", + "ListAssociatedApprovalRuleTemplatesForRepositoryOutput$approvalRuleTemplateNames": "

The names of all approval rule templates associated with the repository.

" + } + }, + "ApprovalRuleTemplateNameRequiredException": { + "base": "

An approval rule template name is required, but was not specified.

", + "refs": { + } + }, + "ApprovalRulesList": { + "base": null, + "refs": { + "PullRequest$approvalRules": "

The approval rules applied to the pull request.

" + } + }, + "ApprovalRulesNotSatisfiedList": { + "base": null, + "refs": { + "Evaluation$approvalRulesNotSatisfied": "

The names of the approval rules that have not had their conditions met.

" + } + }, + "ApprovalRulesSatisfiedList": { + "base": null, + "refs": { + "Evaluation$approvalRulesSatisfied": "

The names of the approval rules that have had their conditions met.

" + } + }, + "ApprovalState": { + "base": null, + "refs": { + "Approval$approvalState": "

The state of the approval, APPROVE or REVOKE. REVOKE states are not stored.

", + "ApprovalStateChangedEventMetadata$approvalStatus": "

The approval status for the pull request.

", + "UpdatePullRequestApprovalStateInput$approvalState": "

The approval state to associate with the user on the pull request.

" + } + }, + "ApprovalStateChangedEventMetadata": { + "base": "

Returns information about a change in the approval state for a pull request.

", + "refs": { + "PullRequestEvent$approvalStateChangedEventMetadata": "

Information about an approval state change for a pull request.

" + } + }, + "ApprovalStateRequiredException": { + "base": "

An approval state is required, but was not specified.

", + "refs": { + } + }, + "Approved": { + "base": null, + "refs": { + "Evaluation$approved": "

Whether the state of the pull request is approved.

" } }, "Arn": { "base": null, "refs": { + "Approval$userArn": "

The Amazon Resource Name (ARN) of the user.

", + "ApprovalRule$lastModifiedUser": "

The Amazon Resource Name (ARN) of the user who made the most recent changes to the approval rule.

", + "ApprovalRuleTemplate$lastModifiedUser": "

The Amazon Resource Name (ARN) of the user who made the most recent changes to the approval rule template.

", "Comment$authorArn": "

The Amazon Resource Name (ARN) of the person who posted the comment.

", - "DescribePullRequestEventsInput$actorArn": "

The Amazon Resource Name (ARN) of the user whose actions resulted in the event. Examples include updating the pull request with additional commits or changing the status of a pull request.

", + "DescribePullRequestEventsInput$actorArn": "

The Amazon Resource Name (ARN) of the user whose actions resulted in the event. Examples include updating the pull request with more commits or changing the status of a pull request.

", + "GetPullRequestOverrideStateOutput$overrider": "

The Amazon Resource Name (ARN) of the user or identity that overrode the rules and their requirements for the pull request.

", "ListPullRequestsInput$authorArn": "

Optional. The Amazon Resource Name (ARN) of the user who created the pull request. If used, this filters the results to pull requests created by that user.

", "MergeMetadata$mergedBy": "

The Amazon Resource Name (ARN) of the user who merged the branches.

", "PullRequest$authorArn": "

The Amazon Resource Name (ARN) of the user who created the pull request.

", - "PullRequestEvent$actorArn": "

The Amazon Resource Name (ARN) of the user whose actions resulted in the event. Examples include updating the pull request with additional commits or changing the status of a pull request.

", + "PullRequestEvent$actorArn": "

The Amazon Resource Name (ARN) of the user whose actions resulted in the event. Examples include updating the pull request with more commits or changing the status of a pull request.

", "RepositoryMetadata$Arn": "

The Amazon Resource Name (ARN) of the repository.

", - "RepositoryTrigger$destinationArn": "

The ARN of the resource that is the target for a trigger. For example, the ARN of a topic in Amazon SNS.

" + "RepositoryTrigger$destinationArn": "

The ARN of the resource that is the target for a trigger (for example, the ARN of a topic in Amazon SNS).

" + } + }, + "AssociateApprovalRuleTemplateWithRepositoryInput": { + "base": null, + "refs": { } }, "AuthorDoesNotExistException": { @@ -93,8 +333,30 @@ "refs": { } }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesError": { + "base": "

Returns information about errors in a BatchAssociateApprovalRuleTemplateWithRepositories operation.

", + "refs": { + "BatchAssociateApprovalRuleTemplateWithRepositoriesErrorsList$member": null + } + }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesErrorsList": { + "base": null, + "refs": { + "BatchAssociateApprovalRuleTemplateWithRepositoriesOutput$errors": "

A list of any errors that might have occurred while attempting to create the association between the template and the repositories.

" + } + }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesInput": { + "base": null, + "refs": { + } + }, + "BatchAssociateApprovalRuleTemplateWithRepositoriesOutput": { + "base": null, + "refs": { + } + }, "BatchDescribeMergeConflictsError": { - "base": "

Information about errors in a BatchDescribeMergeConflicts operation.

", + "base": "

Returns information about errors in a BatchDescribeMergeConflicts operation.

", "refs": { "BatchDescribeMergeConflictsErrors$member": null } @@ -115,6 +377,28 @@ "refs": { } }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesError": { + "base": "

Returns information about errors in a BatchDisassociateApprovalRuleTemplateFromRepositories operation.

", + "refs": { + "BatchDisassociateApprovalRuleTemplateFromRepositoriesErrorsList$member": null + } + }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesErrorsList": { + "base": null, + "refs": { + "BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput$errors": "

A list of any errors that might have occurred while attempting to remove the association between the template and the repositories.

" + } + }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesInput": { + "base": null, + "refs": { + } + }, + "BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput": { + "base": null, + "refs": { + } + }, "BatchGetCommitsError": { "base": "

Returns information about errors in a BatchGetCommits operation.

", "refs": { @@ -124,7 +408,7 @@ "BatchGetCommitsErrorsList": { "base": null, "refs": { - "BatchGetCommitsOutput$errors": "

Returns any commit IDs for which information could not be found. For example, if one of the commit IDs was a shortened SHA or that commit was not found in the specified repository, the ID will return an error object with additional information.

" + "BatchGetCommitsOutput$errors": "

Returns any commit IDs for which information could not be found. For example, if one of the commit IDs was a shortened SHA ID or that commit was not found in the specified repository, the ID returns an error object with more information.

" } }, "BatchGetCommitsInput": { @@ -158,7 +442,7 @@ } }, "BlobIdRequiredException": { - "base": "

A blob ID is required but was not specified.

", + "base": "

A blob ID is required, but was not specified.

", "refs": { } }, @@ -187,14 +471,14 @@ "BranchInfo$branchName": "

The name of the branch.

", "BranchNameList$member": null, "CreateBranchInput$branchName": "

The name of the new branch to create.

", - "CreateCommitInput$branchName": "

The name of the branch where you will create the commit.

", + "CreateCommitInput$branchName": "

The name of the branch where you create the commit.

", "DeleteBranchInput$branchName": "

The name of the branch to delete.

", - "DeleteFileInput$branchName": "

The name of the branch where the commit will be made deleting the file.

", + "DeleteFileInput$branchName": "

The name of the branch where the commit that deletes the file is made.

", "GetBranchInput$branchName": "

The name of the branch for which you want to retrieve information.

", - "MergeBranchesByFastForwardInput$targetBranch": "

The branch where the merge will be applied.

", - "MergeBranchesBySquashInput$targetBranch": "

The branch where the merge will be applied.

", - "MergeBranchesByThreeWayInput$targetBranch": "

The branch where the merge will be applied.

", - "PutFileInput$branchName": "

The name of the branch where you want to add or update the file. If this is an empty repository, this branch will be created.

", + "MergeBranchesByFastForwardInput$targetBranch": "

The branch where the merge is applied.

", + "MergeBranchesBySquashInput$targetBranch": "

The branch where the merge is applied.

", + "MergeBranchesByThreeWayInput$targetBranch": "

The branch where the merge is applied.

", + "PutFileInput$branchName": "

The name of the branch where you want to add or update the file. If this is an empty repository, this branch is created.

", "RepositoryMetadata$defaultBranch": "

The repository's default branch name.

", "UpdateDefaultBranchInput$defaultBranchName": "

The name of the branch to set as the default.

" } @@ -205,7 +489,7 @@ } }, "BranchNameIsTagNameException": { - "base": "

The specified branch name is not valid because it is a tag name. Type the name of a current branch in the repository. For a list of valid branch names, use ListBranches.

", + "base": "

The specified branch name is not valid because it is a tag name. Enter the name of a branch in the repository. For a list of valid branch names, use ListBranches.

", "refs": { } }, @@ -213,11 +497,21 @@ "base": null, "refs": { "ListBranchesOutput$branches": "

The list of branch names.

", - "RepositoryTrigger$branches": "

The branches that will be included in the trigger configuration. If you specify an empty array, the trigger will apply to all branches.

Although no content is required in the array, you must include the array itself.

" + "RepositoryTrigger$branches": "

The branches to be included in the trigger configuration. If you specify an empty array, the trigger applies to all branches.

Although no content is required in the array, you must include the array itself.

" } }, "BranchNameRequiredException": { - "base": "

A branch name is required but was not specified.

", + "base": "

A branch name is required, but was not specified.

", + "refs": { + } + }, + "CannotDeleteApprovalRuleFromTemplateException": { + "base": "

The approval rule cannot be deleted from the pull request because it was created by an approval rule template and applied to the pull request automatically.

", + "refs": { + } + }, + "CannotModifyApprovalRuleFromTemplateException": { + "base": "

The approval rule cannot be modified for the pull request because it was created by an approval rule template and applied to the pull request automatically.

", "refs": { } }, @@ -233,23 +527,23 @@ "base": null, "refs": { "Difference$changeType": "

Whether the change type of the difference is an addition (A), deletion (D), or modification (M).

", - "MergeOperations$source": "

The operation on a file (add, modify, or delete) of a file in the source of a merge or pull request.

", + "MergeOperations$source": "

The operation (add, modify, or delete) on a file in the source of a merge or pull request.

", "MergeOperations$destination": "

The operation on a file in the destination of a merge or pull request.

" } }, "ClientRequestToken": { "base": null, "refs": { - "Comment$clientRequestToken": "

A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.

", - "CreatePullRequestInput$clientRequestToken": "

A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.

The AWS SDKs prepopulate client request tokens. If using an AWS SDK, you do not have to generate an idempotency token, as this will be done for you.

", - "PostCommentForComparedCommitInput$clientRequestToken": "

A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.

", - "PostCommentForPullRequestInput$clientRequestToken": "

A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.

", - "PostCommentReplyInput$clientRequestToken": "

A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.

", - "PullRequest$clientRequestToken": "

A unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.

" + "Comment$clientRequestToken": "

A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.

", + "CreatePullRequestInput$clientRequestToken": "

A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.

The AWS SDKs prepopulate client request tokens. If you are using an AWS SDK, an idempotency token is created for you.

", + "PostCommentForComparedCommitInput$clientRequestToken": "

A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.

", + "PostCommentForPullRequestInput$clientRequestToken": "

A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.

", + "PostCommentReplyInput$clientRequestToken": "

A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.

", + "PullRequest$clientRequestToken": "

A unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.

" } }, "ClientRequestTokenRequiredException": { - "base": "

A client request token is required. A client request token is an unique, client-generated idempotency token that when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request will return information about the initial request that used that token.

", + "base": "

A client request token is required. A client request token is an unique, client-generated idempotency token that, when provided in a request, ensures the request cannot be repeated with a changed parameter. If a request is received with the same parameters and a token is included, the request returns information about the initial request that used that token.

", "refs": { } }, @@ -293,7 +587,7 @@ } }, "CommentDoesNotExistException": { - "base": "

No comment exists with the provided ID. Verify that you have provided the correct ID, and then try again.

", + "base": "

No comment exists with the provided ID. Verify that you have used the correct ID, and then try again.

", "refs": { } }, @@ -365,26 +659,26 @@ "base": null, "refs": { "BranchInfo$commitId": "

The ID of the last commit made to the branch.

", - "CommentsForComparedCommit$beforeCommitId": "

The full commit ID of the commit used to establish the 'before' of the comparison.

", - "CommentsForComparedCommit$afterCommitId": "

The full commit ID of the commit used to establish the 'after' of the comparison.

", - "CommentsForPullRequest$beforeCommitId": "

The full commit ID of the commit that was the tip of the destination branch when the pull request was created. This commit will be superceded by the after commit in the source branch when and if you merge the source branch into the destination branch.

", - "CommentsForPullRequest$afterCommitId": "

he full commit ID of the commit that was the tip of the source branch at the time the comment was made.

", + "CommentsForComparedCommit$beforeCommitId": "

The full commit ID of the commit used to establish the before of the comparison.

", + "CommentsForComparedCommit$afterCommitId": "

The full commit ID of the commit used to establish the after of the comparison.

", + "CommentsForPullRequest$beforeCommitId": "

The full commit ID of the commit that was the tip of the destination branch when the pull request was created. This commit is superceded by the after commit in the source branch when and if you merge the source branch into the destination branch.

", + "CommentsForPullRequest$afterCommitId": "

The full commit ID of the commit that was the tip of the source branch at the time the comment was made.

", "CreateBranchInput$commitId": "

The ID of the commit to point the new branch to.

", - "CreateCommitInput$parentCommitId": "

The ID of the commit that is the parent of the commit you will create. If this is an empty repository, this is not required.

", - "DeleteFileInput$parentCommitId": "

The ID of the commit that is the tip of the branch where you want to create the commit that will delete the file. This must be the HEAD commit for the branch. The commit that deletes the file will be created from this commit ID.

", - "GetCommentsForComparedCommitInput$beforeCommitId": "

To establish the directionality of the comparison, the full commit ID of the 'before' commit.

", - "GetCommentsForComparedCommitInput$afterCommitId": "

To establish the directionality of the comparison, the full commit ID of the 'after' commit.

", + "CreateCommitInput$parentCommitId": "

The ID of the commit that is the parent of the commit you create. Not required if this is an empty repository.

", + "DeleteFileInput$parentCommitId": "

The ID of the commit that is the tip of the branch where you want to create the commit that deletes the file. This must be the HEAD commit for the branch. The commit that deletes the file is created from this commit ID.

", + "GetCommentsForComparedCommitInput$beforeCommitId": "

To establish the directionality of the comparison, the full commit ID of the before commit.

", + "GetCommentsForComparedCommitInput$afterCommitId": "

To establish the directionality of the comparison, the full commit ID of the after commit.

", "GetCommentsForPullRequestInput$beforeCommitId": "

The full commit ID of the commit in the destination branch that was the tip of the branch at the time the pull request was created.

", "GetCommentsForPullRequestInput$afterCommitId": "

The full commit ID of the commit in the source branch that was the tip of the branch at the time the comment was made.

", "MergeMetadata$mergeCommitId": "

The commit ID for the merge commit, if any.

", - "PostCommentForComparedCommitInput$beforeCommitId": "

To establish the directionality of the comparison, the full commit ID of the 'before' commit.

This is required for commenting on any commit unless that commit is the initial commit.

", - "PostCommentForComparedCommitInput$afterCommitId": "

To establish the directionality of the comparison, the full commit ID of the 'after' commit.

", - "PostCommentForComparedCommitOutput$beforeCommitId": "

In the directionality you established, the full commit ID of the 'before' commit.

", - "PostCommentForComparedCommitOutput$afterCommitId": "

In the directionality you established, the full commit ID of the 'after' commit.

", + "PostCommentForComparedCommitInput$beforeCommitId": "

To establish the directionality of the comparison, the full commit ID of the before commit. Required for commenting on any commit unless that commit is the initial commit.

", + "PostCommentForComparedCommitInput$afterCommitId": "

To establish the directionality of the comparison, the full commit ID of the after commit.

", + "PostCommentForComparedCommitOutput$beforeCommitId": "

In the directionality you established, the full commit ID of the before commit.

", + "PostCommentForComparedCommitOutput$afterCommitId": "

In the directionality you established, the full commit ID of the after commit.

", "PostCommentForPullRequestInput$beforeCommitId": "

The full commit ID of the commit in the destination branch that was the tip of the branch at the time the pull request was created.

", "PostCommentForPullRequestInput$afterCommitId": "

The full commit ID of the commit in the source branch that is the current tip of the branch for the pull request when you post the comment.

", "PostCommentForPullRequestOutput$beforeCommitId": "

The full commit ID of the commit in the source branch used to create the pull request, or in the case of an updated pull request, the full commit ID of the commit used to update the pull request.

", - "PostCommentForPullRequestOutput$afterCommitId": "

The full commit ID of the commit in the destination branch where the pull request will be merged.

", + "PostCommentForPullRequestOutput$afterCommitId": "

The full commit ID of the commit in the destination branch where the pull request is merged.

", "PullRequestCreatedEventMetadata$sourceCommitId": "

The commit ID on the source branch used when the pull request was created.

", "PullRequestCreatedEventMetadata$destinationCommitId": "

The commit ID of the tip of the branch specified as the destination branch when the pull request was created.

", "PullRequestCreatedEventMetadata$mergeBase": "

The commit ID of the most recent commit that the source branch and the destination branch have in common.

", @@ -392,9 +686,9 @@ "PullRequestSourceReferenceUpdatedEventMetadata$afterCommitId": "

The full commit ID of the commit in the source branch that was the tip of the branch at the time the pull request was updated.

", "PullRequestSourceReferenceUpdatedEventMetadata$mergeBase": "

The commit ID of the most recent commit that the source branch and the destination branch have in common.

", "PullRequestTarget$destinationCommit": "

The full commit ID that is the tip of the destination branch. This is the commit where the pull request was or will be merged.

", - "PullRequestTarget$sourceCommit": "

The full commit ID of the tip of the source branch used to create the pull request. If the pull request branch is updated by a push while the pull request is open, the commit ID will change to reflect the new tip of the branch.

", + "PullRequestTarget$sourceCommit": "

The full commit ID of the tip of the source branch used to create the pull request. If the pull request branch is updated by a push while the pull request is open, the commit ID changes to reflect the new tip of the branch.

", "PullRequestTarget$mergeBase": "

The commit ID of the most recent commit that the source branch and the destination branch have in common.

", - "PutFileInput$parentCommitId": "

The full commit ID of the head commit in the branch where you want to add or update the file. If this is an empty repository, no commit ID is required. If this is not an empty repository, a commit ID is required.

The commit ID must match the ID of the head commit at the time of the operation, or an error will occur, and the file will not be added or updated.

" + "PutFileInput$parentCommitId": "

The full commit ID of the head commit in the branch where you want to add or update the file. If this is an empty repository, no commit ID is required. If this is not an empty repository, a commit ID is required.

The commit ID must match the ID of the head commit at the time of the operation. Otherwise, an error occurs, and the file is not added or updated.

" } }, "CommitIdDoesNotExistException": { @@ -410,7 +704,7 @@ "CommitIdsInputList": { "base": null, "refs": { - "BatchGetCommitsInput$commitIds": "

The full commit IDs of the commits to get information about.

You must supply the full SHAs of each commit. You cannot use shortened SHAs.

" + "BatchGetCommitsInput$commitIds": "

The full commit IDs of the commits to get information about.

You must supply the full SHA IDs of each commit. You cannot use shortened SHA IDs.

" } }, "CommitIdsLimitExceededException": { @@ -419,7 +713,7 @@ } }, "CommitIdsListRequiredException": { - "base": null, + "base": "

A list of commit IDs is required, but was either not specified or the list was empty.

", "refs": { } }, @@ -431,28 +725,28 @@ "CommitName": { "base": null, "refs": { - "BatchDescribeMergeConflictsInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "BatchDescribeMergeConflictsInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "CreateUnreferencedMergeCommitInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "CreateUnreferencedMergeCommitInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "DescribeMergeConflictsInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "DescribeMergeConflictsInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "GetDifferencesInput$beforeCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, the full commit ID. Optional. If not specified, all changes prior to the afterCommitSpecifier value will be shown. If you do not use beforeCommitSpecifier in your request, consider limiting the results with maxResults.

", + "BatchDescribeMergeConflictsInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "BatchDescribeMergeConflictsInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "CreateUnreferencedMergeCommitInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "CreateUnreferencedMergeCommitInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "DescribeMergeConflictsInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "DescribeMergeConflictsInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "GetDifferencesInput$beforeCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, the full commit ID). Optional. If not specified, all changes before the afterCommitSpecifier value are shown. If you do not use beforeCommitSpecifier in your request, consider limiting the results with maxResults.

", "GetDifferencesInput$afterCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit.

", - "GetFileInput$commitSpecifier": "

The fully-quaified reference that identifies the commit that contains the file. For example, you could specify a full commit ID, a tag, a branch name, or a reference such as refs/heads/master. If none is provided, then the head commit will be used.

", - "GetFolderInput$commitSpecifier": "

A fully-qualified reference used to identify a commit that contains the version of the folder's content to return. A fully-qualified reference can be a commit ID, branch name, tag, or reference such as HEAD. If no specifier is provided, the folder content will be returned as it exists in the HEAD commit.

", - "GetMergeCommitInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "GetMergeCommitInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "GetMergeConflictsInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "GetMergeConflictsInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "GetMergeOptionsInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "GetMergeOptionsInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "MergeBranchesByFastForwardInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "MergeBranchesByFastForwardInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "MergeBranchesBySquashInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "MergeBranchesBySquashInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "MergeBranchesByThreeWayInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

", - "MergeBranchesByThreeWayInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit. For example, a branch name or a full commit ID.

" + "GetFileInput$commitSpecifier": "

The fully quaified reference that identifies the commit that contains the file. For example, you can specify a full commit ID, a tag, a branch name, or a reference such as refs/heads/master. If none is provided, the head commit is used.

", + "GetFolderInput$commitSpecifier": "

A fully qualified reference used to identify a commit that contains the version of the folder's content to return. A fully qualified reference can be a commit ID, branch name, tag, or reference such as HEAD. If no specifier is provided, the folder content is returned as it exists in the HEAD commit.

", + "GetMergeCommitInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "GetMergeCommitInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "GetMergeConflictsInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "GetMergeConflictsInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "GetMergeOptionsInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "GetMergeOptionsInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "MergeBranchesByFastForwardInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "MergeBranchesByFastForwardInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "MergeBranchesBySquashInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "MergeBranchesBySquashInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "MergeBranchesByThreeWayInput$sourceCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

", + "MergeBranchesByThreeWayInput$destinationCommitSpecifier": "

The branch, tag, HEAD, or other fully qualified reference used to identify a commit (for example, a branch name or a full commit ID).

" } }, "CommitObjectsList": { @@ -480,16 +774,16 @@ "ConflictDetailLevelTypeEnum": { "base": null, "refs": { - "BatchDescribeMergeConflictsInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

", - "CreateUnreferencedMergeCommitInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

", - "DescribeMergeConflictsInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

", - "GetMergeCommitInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

", - "GetMergeConflictsInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

", - "GetMergeOptionsInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

", - "MergeBranchesBySquashInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

", - "MergeBranchesByThreeWayInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

", - "MergePullRequestBySquashInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

", - "MergePullRequestByThreeWayInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which will return a not mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict will be considered not mergeable if the same file in both branches has differences on the same line.

" + "BatchDescribeMergeConflictsInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.

", + "CreateUnreferencedMergeCommitInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.

", + "DescribeMergeConflictsInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.

", + "GetMergeCommitInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.

", + "GetMergeConflictsInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.

", + "GetMergeOptionsInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.

", + "MergeBranchesBySquashInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.

", + "MergeBranchesByThreeWayInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.

", + "MergePullRequestBySquashInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.

", + "MergePullRequestByThreeWayInput$conflictDetailLevel": "

The level of conflict detail to use. If unspecified, the default FILE_LEVEL is used, which returns a not-mergeable result if the same file has differences in both branches. If LINE_LEVEL is specified, a conflict is considered not mergeable if the same file in both branches has differences on the same line.

" } }, "ConflictMetadata": { @@ -503,32 +797,32 @@ "ConflictMetadataList": { "base": null, "refs": { - "GetMergeConflictsOutput$conflictMetadataList": "

A list of metadata for any conflicting files. If the specified merge strategy is FAST_FORWARD_MERGE, this list will always be empty.

" + "GetMergeConflictsOutput$conflictMetadataList": "

A list of metadata for any conflicting files. If the specified merge strategy is FAST_FORWARD_MERGE, this list is always empty.

" } }, "ConflictResolution": { - "base": "

A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.

", + "base": "

If AUTOMERGE is the conflict resolution strategy, a list of inputs to use when resolving conflicts during a merge.

", "refs": { - "CreateUnreferencedMergeCommitInput$conflictResolution": "

A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.

", - "MergeBranchesBySquashInput$conflictResolution": "

A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.

", - "MergeBranchesByThreeWayInput$conflictResolution": "

A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.

", - "MergePullRequestBySquashInput$conflictResolution": "

A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.

", - "MergePullRequestByThreeWayInput$conflictResolution": "

A list of inputs to use when resolving conflicts during a merge if AUTOMERGE is chosen as the conflict resolution strategy.

" + "CreateUnreferencedMergeCommitInput$conflictResolution": "

If AUTOMERGE is the conflict resolution strategy, a list of inputs to use when resolving conflicts during a merge.

", + "MergeBranchesBySquashInput$conflictResolution": "

If AUTOMERGE is the conflict resolution strategy, a list of inputs to use when resolving conflicts during a merge.

", + "MergeBranchesByThreeWayInput$conflictResolution": "

If AUTOMERGE is the conflict resolution strategy, a list of inputs to use when resolving conflicts during a merge.

", + "MergePullRequestBySquashInput$conflictResolution": "

If AUTOMERGE is the conflict resolution strategy, a list of inputs to use when resolving conflicts during a merge.

", + "MergePullRequestByThreeWayInput$conflictResolution": "

If AUTOMERGE is the conflict resolution strategy, a list of inputs to use when resolving conflicts during a merge.

" } }, "ConflictResolutionStrategyTypeEnum": { "base": null, "refs": { - "BatchDescribeMergeConflictsInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

", - "CreateUnreferencedMergeCommitInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

", - "DescribeMergeConflictsInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

", - "GetMergeCommitInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

", - "GetMergeConflictsInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

", - "GetMergeOptionsInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

", - "MergeBranchesBySquashInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

", - "MergeBranchesByThreeWayInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

", - "MergePullRequestBySquashInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

", - "MergePullRequestByThreeWayInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation will be successful.

" + "BatchDescribeMergeConflictsInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.

", + "CreateUnreferencedMergeCommitInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.

", + "DescribeMergeConflictsInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.

", + "GetMergeCommitInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.

", + "GetMergeConflictsInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.

", + "GetMergeOptionsInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.

", + "MergeBranchesBySquashInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.

", + "MergeBranchesByThreeWayInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.

", + "MergePullRequestBySquashInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.

", + "MergePullRequestByThreeWayInput$conflictResolutionStrategy": "

Specifies which branch to use when resolving conflicts, or whether to attempt automatically merging two versions of a file. The default is NONE, which requires any conflicts to be resolved manually before the merge operation is successful.

" } }, "Conflicts": { @@ -544,7 +838,17 @@ "PostCommentForComparedCommitInput$content": "

The content of the comment you want to make.

", "PostCommentForPullRequestInput$content": "

The content of your comment on the change.

", "PostCommentReplyInput$content": "

The contents of your reply to a comment.

", - "UpdateCommentInput$content": "

The updated content with which you want to replace the existing content of the comment.

" + "UpdateCommentInput$content": "

The updated content to replace the existing content of the comment.

" + } + }, + "CreateApprovalRuleTemplateInput": { + "base": null, + "refs": { + } + }, + "CreateApprovalRuleTemplateOutput": { + "base": null, + "refs": { } }, "CreateBranchInput": { @@ -562,6 +866,16 @@ "refs": { } }, + "CreatePullRequestApprovalRuleInput": { + "base": null, + "refs": { + } + }, + "CreatePullRequestApprovalRuleOutput": { + "base": null, + "refs": { + } + }, "CreatePullRequestInput": { "base": null, "refs": { @@ -595,6 +909,8 @@ "CreationDate": { "base": null, "refs": { + "ApprovalRule$creationDate": "

The date the approval rule was created, in timestamp format.

", + "ApprovalRuleTemplate$creationDate": "

The date the approval rule template was created, in timestamp format.

", "Comment$creationDate": "

The date and time the comment was created, in timestamp format.

", "PullRequest$creationDate": "

The date and time the pull request was originally created, in timestamp format.

", "RepositoryMetadata$creationDate": "

The date and time the repository was created, in timestamp format.

" @@ -611,6 +927,16 @@ "refs": { } }, + "DeleteApprovalRuleTemplateInput": { + "base": null, + "refs": { + } + }, + "DeleteApprovalRuleTemplateOutput": { + "base": null, + "refs": { + } + }, "DeleteBranchInput": { "base": "

Represents the input of a delete branch operation.

", "refs": { @@ -634,12 +960,12 @@ "DeleteFileEntries": { "base": null, "refs": { - "ConflictResolution$deleteFiles": "

Files that will be deleted as part of the merge conflict resolution.

", - "CreateCommitInput$deleteFiles": "

The files to delete in this commit. These files will still exist in prior commits.

" + "ConflictResolution$deleteFiles": "

Files to be deleted as part of the merge conflict resolution.

", + "CreateCommitInput$deleteFiles": "

The files to delete in this commit. These files still exist in earlier commits.

" } }, "DeleteFileEntry": { - "base": "

A file that will be deleted as part of a commit.

", + "base": "

A file that is deleted as part of a commit.

", "refs": { "DeleteFileEntries$member": null } @@ -654,6 +980,16 @@ "refs": { } }, + "DeletePullRequestApprovalRuleInput": { + "base": null, + "refs": { + } + }, + "DeletePullRequestApprovalRuleOutput": { + "base": null, + "refs": { + } + }, "DeleteRepositoryInput": { "base": "

Represents the input of a delete repository operation.

", "refs": { @@ -689,7 +1025,7 @@ "refs": { "CreatePullRequestInput$description": "

A description of the pull request.

", "PullRequest$description": "

The user-defined description of the pull request. This description can be used to clarify what should be reviewed and other details of the request.

", - "UpdatePullRequestDescriptionInput$description": "

The updated content of the description for the pull request. This content will replace the existing description.

" + "UpdatePullRequestDescriptionInput$description": "

The updated content of the description for the pull request. This content replaces the existing description.

" } }, "Difference": { @@ -701,7 +1037,7 @@ "DifferenceList": { "base": null, "refs": { - "GetDifferencesOutput$differences": "

A differences data type object that contains information about the differences, including whether the difference is added, modified, or deleted (A, D, M).

" + "GetDifferencesOutput$differences": "

A data type object that contains information about the differences, including whether the difference is added, modified, or deleted (A, D, M).

" } }, "DirectoryNameConflictsWithFileNameException": { @@ -709,16 +1045,21 @@ "refs": { } }, + "DisassociateApprovalRuleTemplateFromRepositoryInput": { + "base": null, + "refs": { + } + }, "Email": { "base": null, "refs": { "CreateCommitInput$email": "

The email address of the person who created the commit.

", "CreateUnreferencedMergeCommitInput$email": "

The email address for the person who created the unreferenced commit.

", - "DeleteFileInput$email": "

The email address for the commit that deletes the file. If no email address is specified, the email address will be left blank.

", - "MergeBranchesBySquashInput$email": "

The email address of the person merging the branches. This information will be used in the commit information for the merge.

", - "MergeBranchesByThreeWayInput$email": "

The email address of the person merging the branches. This information will be used in the commit information for the merge.

", - "MergePullRequestBySquashInput$email": "

The email address of the person merging the branches. This information will be used in the commit information for the merge.

", - "MergePullRequestByThreeWayInput$email": "

The email address of the person merging the branches. This information will be used in the commit information for the merge.

", + "DeleteFileInput$email": "

The email address for the commit that deletes the file. If no email address is specified, the email address is left blank.

", + "MergeBranchesBySquashInput$email": "

The email address of the person merging the branches. This information is used in the commit information for the merge.

", + "MergeBranchesByThreeWayInput$email": "

The email address of the person merging the branches. This information is used in the commit information for the merge.

", + "MergePullRequestBySquashInput$email": "

The email address of the person merging the branches. This information is used in the commit information for the merge.

", + "MergePullRequestByThreeWayInput$email": "

The email address of the person merging the branches. This information is used in the commit information for the merge.

", "PutFileInput$email": "

An email address for the person adding or updating the file.

", "UserInfo$email": "

The email address associated with the user who made the commit, if any.

" } @@ -751,15 +1092,35 @@ "ErrorCode": { "base": null, "refs": { + "BatchAssociateApprovalRuleTemplateWithRepositoriesError$errorCode": "

An error code that specifies whether the repository name was not valid or not found.

", + "BatchDisassociateApprovalRuleTemplateFromRepositoriesError$errorCode": "

An error code that specifies whether the repository name was not valid or not found.

", "BatchGetCommitsError$errorCode": "

An error code that specifies whether the commit ID was not valid or not found.

" } }, "ErrorMessage": { "base": null, "refs": { + "BatchAssociateApprovalRuleTemplateWithRepositoriesError$errorMessage": "

An error message that provides details about why the repository name was not found or not valid.

", + "BatchDisassociateApprovalRuleTemplateFromRepositoriesError$errorMessage": "

An error message that provides details about why the repository name was either not found or not valid.

", "BatchGetCommitsError$errorMessage": "

An error message that provides detail about why the commit ID either was not found or was not valid.

" } }, + "EvaluatePullRequestApprovalRulesInput": { + "base": null, + "refs": { + } + }, + "EvaluatePullRequestApprovalRulesOutput": { + "base": null, + "refs": { + } + }, + "Evaluation": { + "base": "

Returns information about the approval rules applied to a pull request and whether conditions have been met.

", + "refs": { + "EvaluatePullRequestApprovalRulesOutput$evaluation": "

The result of the evaluation, including the names of the rules whose conditions have been met (if any), the names of the rules whose conditions have not been met (if any), whether the pull request is in the approved state, and whether the pull request approval rule has been set aside by an override.

" + } + }, "EventDate": { "base": null, "refs": { @@ -788,7 +1149,7 @@ } }, "FileContentAndSourceFileSpecifiedException": { - "base": "

The commit cannot be created because both a source file and file content have been specified for the same file. You cannot provide both. Either specify a source file, or provide the file content directly.

", + "base": "

The commit cannot be created because both a source file and file content have been specified for the same file. You cannot provide both. Either specify a source file or provide the file content directly.

", "refs": { } }, @@ -798,12 +1159,12 @@ } }, "FileContentSizeLimitExceededException": { - "base": "

The file cannot be added because it is too large. The maximum file size that can be added is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.

", + "base": "

The file cannot be added because it is too large. The maximum file size is 6 MB, and the combined file content change size is 7 MB. Consider making these changes using a Git client.

", "refs": { } }, "FileDoesNotExistException": { - "base": "

The specified file does not exist. Verify that you have provided the correct name of the file, including its full path and extension.

", + "base": "

The specified file does not exist. Verify that you have used the correct file name, full path, and extension.

", "refs": { } }, @@ -815,17 +1176,17 @@ "FileList": { "base": null, "refs": { - "GetFolderOutput$files": "

The list of files that exist in the specified folder, if any.

" + "GetFolderOutput$files": "

The list of files in the specified folder, if any.

" } }, "FileMetadata": { - "base": "

A file that will be added, updated, or deleted as part of a commit.

", + "base": "

A file to be added, updated, or deleted as part of a commit.

", "refs": { "FilesMetadata$member": null } }, "FileModeRequiredException": { - "base": "

The commit cannot be created because a file mode is required to update mode permissions for an existing file, but no file mode has been specified.

", + "base": "

The commit cannot be created because no file mode has been specified. A file mode is required to update mode permissions for a file.

", "refs": { } }, @@ -837,9 +1198,9 @@ "FileModes$source": "

The file mode of a file in the source of a merge or pull request.

", "FileModes$destination": "

The file mode of a file in the destination of a merge or pull request.

", "FileModes$base": "

The file mode of a file in the base of a merge or pull request.

", - "GetFileOutput$fileMode": "

The extrapolated file mode permissions of the blob. Valid values include strings such as EXECUTABLE and not numeric values.

The file mode permissions returned by this API are not the standard file mode permission values, such as 100644, but rather extrapolated values. See below for a full list of supported return values.

", + "GetFileOutput$fileMode": "

The extrapolated file mode permissions of the blob. Valid values include strings such as EXECUTABLE and not numeric values.

The file mode permissions returned by this API are not the standard file mode permission values, such as 100644, but rather extrapolated values. See the supported return values.

", "PutFileEntry$fileMode": "

The extrapolated file mode permissions for the file. Valid values include EXECUTABLE and NORMAL.

", - "PutFileInput$fileMode": "

The file mode permissions of the blob. Valid file mode permissions are listed below.

", + "PutFileInput$fileMode": "

The file mode permissions of the blob. Valid file mode permissions are listed here.

", "ReplaceContentEntry$fileMode": "

The file mode to apply during conflict resoltion.

", "SetFileModeEntry$fileMode": "

The file mode for the file.

", "SymbolicLink$fileMode": "

The file mode permissions of the blob that cotains information about the symbolic link.

" @@ -906,14 +1267,24 @@ } }, "FolderDoesNotExistException": { - "base": "

The specified folder does not exist. Either the folder name is not correct, or you did not provide the full path to the folder.

", + "base": "

The specified folder does not exist. Either the folder name is not correct, or you did not enter the full path to the folder.

", "refs": { } }, "FolderList": { "base": null, "refs": { - "GetFolderOutput$subFolders": "

The list of folders that exist beneath the specified folder, if any.

" + "GetFolderOutput$subFolders": "

The list of folders that exist under the specified folder, if any.

" + } + }, + "GetApprovalRuleTemplateInput": { + "base": null, + "refs": { + } + }, + "GetApprovalRuleTemplateOutput": { + "base": null, + "refs": { } }, "GetBlobInput": { @@ -1036,6 +1407,16 @@ "refs": { } }, + "GetPullRequestApprovalStatesInput": { + "base": null, + "refs": { + } + }, + "GetPullRequestApprovalStatesOutput": { + "base": null, + "refs": { + } + }, "GetPullRequestInput": { "base": null, "refs": { @@ -1046,6 +1427,16 @@ "refs": { } }, + "GetPullRequestOverrideStateInput": { + "base": null, + "refs": { + } + }, + "GetPullRequestOverrideStateOutput": { + "base": null, + "refs": { + } + }, "GetRepositoryInput": { "base": "

Represents the input of a get repository operation.

", "refs": { @@ -1069,11 +1460,11 @@ "HunkContent": { "base": null, "refs": { - "MergeHunkDetail$hunkContent": "

The base-64 encoded content of the hunk merged region that might or might not contain a conflict.

" + "MergeHunkDetail$hunkContent": "

The base-64 encoded content of the hunk merged region that might contain a conflict.

" } }, "IdempotencyParameterMismatchException": { - "base": "

The client request token is not valid. Either the token is not in a valid format, or the token has been used in a previous request and cannot be re-used.

", + "base": "

The client request token is not valid. Either the token is not in a valid format, or the token has been used in a previous request and cannot be reused.

", "refs": { } }, @@ -1082,6 +1473,36 @@ "refs": { } }, + "InvalidApprovalRuleContentException": { + "base": "

The content for the approval rule is not valid.

", + "refs": { + } + }, + "InvalidApprovalRuleNameException": { + "base": "

The name for the approval rule is not valid.

", + "refs": { + } + }, + "InvalidApprovalRuleTemplateContentException": { + "base": "

The content of the approval rule template is not valid.

", + "refs": { + } + }, + "InvalidApprovalRuleTemplateDescriptionException": { + "base": "

The description for the approval rule template is not valid because it exceeds the maximum characters allowed for a description. For more information about limits in AWS CodeCommit, see AWS CodeCommit User Guide.

", + "refs": { + } + }, + "InvalidApprovalRuleTemplateNameException": { + "base": "

The name of the approval rule template is not valid. Template names must be between 1 and 100 valid characters in length. For more information about limits in AWS CodeCommit, see AWS CodeCommit User Guide.

", + "refs": { + } + }, + "InvalidApprovalStateException": { + "base": "

The state for the approval is not valid. Valid values include APPROVE and REVOKE.

", + "refs": { + } + }, "InvalidAuthorArnException": { "base": "

The Amazon Resource Name (ARN) is not valid. Make sure that you have provided the full ARN for the author of the pull request, and then try again.

", "refs": { @@ -1143,7 +1564,7 @@ } }, "InvalidDescriptionException": { - "base": "

The pull request description is not valid. Descriptions are limited to 1,000 characters in length.

", + "base": "

The pull request description is not valid. Descriptions cannot be more than 1,000 characters.

", "refs": { } }, @@ -1158,7 +1579,7 @@ } }, "InvalidFileLocationException": { - "base": "

The location of the file is not valid. Make sure that you include the extension of the file as well as the file name.

", + "base": "

The location of the file is not valid. Make sure that you include the file name and extension.

", "refs": { } }, @@ -1197,6 +1618,11 @@ "refs": { } }, + "InvalidOverrideStatusException": { + "base": "

The override status is not valid. Valid statuses are OVERRIDE and REVOKE.

", + "refs": { + } + }, "InvalidParentCommitIdException": { "base": "

The parent commit ID is not valid. The commit ID cannot be empty, and must match the head commit ID for the branch of the repository where you want to add or update a file.

", "refs": { @@ -1228,7 +1654,7 @@ } }, "InvalidReferenceNameException": { - "base": "

The specified reference name format is not valid. Reference names must conform to the Git references format, for example refs/heads/master. For more information, see Git Internals - Git References or consult your Git documentation.

", + "base": "

The specified reference name format is not valid. Reference names must conform to the Git references format (for example, refs/heads/master). For more information, see Git Internals - Git References or consult your Git documentation.

", "refs": { } }, @@ -1253,7 +1679,7 @@ } }, "InvalidRepositoryNameException": { - "base": "

At least one specified repository name is not valid.

This exception only occurs when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

", + "base": "

A specified repository name is not valid.

This exception occurs only when a specified repository name is not valid. Other exceptions occur when a required repository parameter is missing, or when a specified repository does not exist.

", "refs": { } }, @@ -1283,7 +1709,7 @@ } }, "InvalidRepositoryTriggerRegionException": { - "base": "

The region for the trigger target does not match the region for the repository. Triggers must be created in the same region as the target for the trigger.

", + "base": "

The AWS Region for the trigger target does not match the AWS Region for the repository. Triggers must be created in the same Region as the target for the trigger.

", "refs": { } }, @@ -1292,6 +1718,16 @@ "refs": { } }, + "InvalidRevisionIdException": { + "base": "

The revision ID is not valid. Use GetPullRequest to determine the value.

", + "refs": { + } + }, + "InvalidRuleContentSha256Exception": { + "base": "

The SHA-256 hash signature for the rule content is not valid.

", + "refs": { + } + }, "InvalidSortByException": { "base": "

The specified sort by value is not valid.

", "refs": { @@ -1364,7 +1800,7 @@ "IsHunkConflict": { "base": null, "refs": { - "MergeHunk$isConflict": "

A Boolean value indicating whether a combination of hunks contains a conflict. Conflicts occur when the same file or the same lines in a file were modified in both the source and destination of a merge or pull request. Valid values include true, false, and null. This will be true when the hunk represents a conflict and one or more files contains a line conflict. File mode conflicts in a merge will not set this to be true.

" + "MergeHunk$isConflict": "

A Boolean value indicating whether a combination of hunks contains a conflict. Conflicts occur when the same file or the same lines in a file were modified in both the source and destination of a merge or pull request. Valid values include true, false, and null. True when the hunk represents a conflict and one or more files contains a line conflict. File mode conflicts in a merge do not set this to true.

" } }, "IsMergeable": { @@ -1394,18 +1830,20 @@ "KeepEmptyFolders": { "base": null, "refs": { - "CreateCommitInput$keepEmptyFolders": "

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.

", - "CreateUnreferencedMergeCommitInput$keepEmptyFolders": "

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.

", - "DeleteFileInput$keepEmptyFolders": "

Specifies whether to delete the folder or directory that contains the file you want to delete if that file is the only object in the folder or directory. By default, empty folders will be deleted. This includes empty folders that are part of the directory structure. For example, if the path to a file is dir1/dir2/dir3/dir4, and dir2 and dir3 are empty, deleting the last file in dir4 will also delete the empty folders dir4, dir3, and dir2.

", - "MergeBranchesBySquashInput$keepEmptyFolders": "

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.

", - "MergeBranchesByThreeWayInput$keepEmptyFolders": "

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.

", - "MergePullRequestBySquashInput$keepEmptyFolders": "

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.

", - "MergePullRequestByThreeWayInput$keepEmptyFolders": "

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file will be created for empty folders. The default is false.

" + "CreateCommitInput$keepEmptyFolders": "

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If true, a ..gitkeep file is created for empty folders. The default is false.

", + "CreateUnreferencedMergeCommitInput$keepEmptyFolders": "

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file is created for empty folders. The default is false.

", + "DeleteFileInput$keepEmptyFolders": "

If a file is the only object in the folder or directory, specifies whether to delete the folder or directory that contains the file. By default, empty folders are deleted. This includes empty folders that are part of the directory structure. For example, if the path to a file is dir1/dir2/dir3/dir4, and dir2 and dir3 are empty, deleting the last file in dir4 also deletes the empty folders dir4, dir3, and dir2.

", + "MergeBranchesBySquashInput$keepEmptyFolders": "

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If this is specified as true, a .gitkeep file is created for empty folders. The default is false.

", + "MergeBranchesByThreeWayInput$keepEmptyFolders": "

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If true, a .gitkeep file is created for empty folders. The default is false.

", + "MergePullRequestBySquashInput$keepEmptyFolders": "

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If true, a .gitkeep file is created for empty folders. The default is false.

", + "MergePullRequestByThreeWayInput$keepEmptyFolders": "

If the commit contains deletions, whether to keep a folder or folder structure if the changes leave the folders empty. If true, a .gitkeep file is created for empty folders. The default is false.

" } }, "LastModifiedDate": { "base": null, "refs": { + "ApprovalRule$lastModifiedDate": "

The date the approval rule was most recently changed, in timestamp format.

", + "ApprovalRuleTemplate$lastModifiedDate": "

The date the approval rule template was most recently changed, in timestamp format.

", "Comment$lastModifiedDate": "

The date and time the comment was most recently modified, in timestamp format.

", "PullRequest$lastActivityDate": "

The day and time of the last user or system activity on the pull request, in timestamp format.

", "RepositoryMetadata$lastModifiedDate": "

The date and time the repository was last modified, in timestamp format.

" @@ -1414,7 +1852,7 @@ "Limit": { "base": null, "refs": { - "GetDifferencesInput$MaxResults": "

A non-negative integer used to limit the number of returned results.

" + "GetDifferencesInput$MaxResults": "

A non-zero, non-negative integer used to limit the number of returned results.

" } }, "LineNumber": { @@ -1424,6 +1862,26 @@ "MergeHunkDetail$endLine": "

The end position of the hunk in the merge result.

" } }, + "ListApprovalRuleTemplatesInput": { + "base": null, + "refs": { + } + }, + "ListApprovalRuleTemplatesOutput": { + "base": null, + "refs": { + } + }, + "ListAssociatedApprovalRuleTemplatesForRepositoryInput": { + "base": null, + "refs": { + } + }, + "ListAssociatedApprovalRuleTemplatesForRepositoryOutput": { + "base": null, + "refs": { + } + }, "ListBranchesInput": { "base": "

Represents the input of a list branches operation.

", "refs": { @@ -1444,6 +1902,16 @@ "refs": { } }, + "ListRepositoriesForApprovalRuleTemplateInput": { + "base": null, + "refs": { + } + }, + "ListRepositoriesForApprovalRuleTemplateOutput": { + "base": null, + "refs": { + } + }, "ListRepositoriesInput": { "base": "

Represents the input of a list repositories operation.

", "refs": { @@ -1467,11 +1935,11 @@ "Location": { "base": "

Returns information about the location of a change or comment in the comparison between two commits or a pull request.

", "refs": { - "CommentsForComparedCommit$location": "

Location information about the comment on the comparison, including the file name, line number, and whether the version of the file where the comment was made is 'BEFORE' or 'AFTER'.

", - "CommentsForPullRequest$location": "

Location information about the comment on the pull request, including the file name, line number, and whether the version of the file where the comment was made is 'BEFORE' (destination branch) or 'AFTER' (source branch).

", + "CommentsForComparedCommit$location": "

Location information about the comment on the comparison, including the file name, line number, and whether the version of the file where the comment was made is BEFORE or AFTER.

", + "CommentsForPullRequest$location": "

Location information about the comment on the pull request, including the file name, line number, and whether the version of the file where the comment was made is BEFORE (destination branch) or AFTER (source branch).

", "PostCommentForComparedCommitInput$location": "

The location of the comparison where you want to comment.

", "PostCommentForComparedCommitOutput$location": "

The location of the comment in the comparison between the two commits.

", - "PostCommentForPullRequestInput$location": "

The location of the change where you want to post your comment. If no location is provided, the comment will be posted as a general comment on the pull request difference between the before commit ID and the after commit ID.

", + "PostCommentForPullRequestInput$location": "

The location of the change where you want to post your comment. If no location is provided, the comment is posted as a general comment on the pull request difference between the before commit ID and the after commit ID.

", "PostCommentForPullRequestOutput$location": "

The location of the change where you posted your comment.

" } }, @@ -1486,11 +1954,14 @@ "BatchDescribeMergeConflictsInput$maxMergeHunks": "

The maximum number of merge hunks to include in the output.

", "BatchDescribeMergeConflictsInput$maxConflictFiles": "

The maximum number of files to include in the output.

", "DescribeMergeConflictsInput$maxMergeHunks": "

The maximum number of merge hunks to include in the output.

", - "DescribePullRequestEventsInput$maxResults": "

A non-negative integer used to limit the number of returned results. The default is 100 events, which is also the maximum number of events that can be returned in a result.

", - "GetCommentsForComparedCommitInput$maxResults": "

A non-negative integer used to limit the number of returned results. The default is 100 comments, and is configurable up to 500.

", - "GetCommentsForPullRequestInput$maxResults": "

A non-negative integer used to limit the number of returned results. The default is 100 comments. You can return up to 500 comments with a single request.

", + "DescribePullRequestEventsInput$maxResults": "

A non-zero, non-negative integer used to limit the number of returned results. The default is 100 events, which is also the maximum number of events that can be returned in a result.

", + "GetCommentsForComparedCommitInput$maxResults": "

A non-zero, non-negative integer used to limit the number of returned results. The default is 100 comments, but you can configure up to 500.

", + "GetCommentsForPullRequestInput$maxResults": "

A non-zero, non-negative integer used to limit the number of returned results. The default is 100 comments. You can return up to 500 comments with a single request.

", "GetMergeConflictsInput$maxConflictFiles": "

The maximum number of files to include in the output.

", - "ListPullRequestsInput$maxResults": "

A non-negative integer used to limit the number of returned results.

" + "ListApprovalRuleTemplatesInput$maxResults": "

A non-zero, non-negative integer used to limit the number of returned results.

", + "ListAssociatedApprovalRuleTemplatesForRepositoryInput$maxResults": "

A non-zero, non-negative integer used to limit the number of returned results.

", + "ListPullRequestsInput$maxResults": "

A non-zero, non-negative integer used to limit the number of returned results.

", + "ListRepositoriesForApprovalRuleTemplateInput$maxResults": "

A non-zero, non-negative integer used to limit the number of returned results.

" } }, "MaximumBranchesExceededException": { @@ -1514,7 +1985,12 @@ } }, "MaximumItemsToCompareExceededException": { - "base": "

The maximum number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.

", + "base": "

The number of items to compare between the source or destination branches and the merge base has exceeded the maximum allowed.

", + "refs": { + } + }, + "MaximumNumberOfApprovalsExceededException": { + "base": "

The number of approvals required for the approval rule exceeds the maximum number allowed.

", "refs": { } }, @@ -1524,7 +2000,7 @@ } }, "MaximumRepositoryNamesExceededException": { - "base": "

The maximum number of allowed repository names was exceeded. Currently, this number is 25.

", + "base": "

The maximum number of allowed repository names was exceeded. Currently, this number is 100.

", "refs": { } }, @@ -1533,6 +2009,11 @@ "refs": { } }, + "MaximumRuleTemplatesAssociatedWithRepositoryException": { + "base": "

The maximum number of approval rule templates for a repository has been exceeded. You cannot associate more than 25 approval rule templates with a repository.

", + "refs": { + } + }, "MergeBranchesByFastForwardInput": { "base": null, "refs": { @@ -1654,14 +2135,14 @@ "refs": { "BatchDescribeMergeConflictsError$message": "

The message provided by the exception.

", "Commit$message": "

The commit message associated with the specified commit.

", - "CreateCommitInput$commitMessage": "

The commit message you want to include as part of creating the commit. Commit messages are limited to 256 KB. If no message is specified, a default message will be used.

", + "CreateCommitInput$commitMessage": "

The commit message you want to include in the commit. Commit messages are limited to 256 KB. If no message is specified, a default message is used.

", "CreateUnreferencedMergeCommitInput$commitMessage": "

The commit message for the unreferenced commit.

", - "DeleteFileInput$commitMessage": "

The commit message you want to include as part of deleting the file. Commit messages are limited to 256 KB. If no message is specified, a default message will be used.

", + "DeleteFileInput$commitMessage": "

The commit message you want to include as part of deleting the file. Commit messages are limited to 256 KB. If no message is specified, a default message is used.

", "MergeBranchesBySquashInput$commitMessage": "

The commit message for the merge.

", "MergeBranchesByThreeWayInput$commitMessage": "

The commit message to include in the commit information for the merge.

", "MergePullRequestBySquashInput$commitMessage": "

The commit message to include in the commit information for the merge.

", "MergePullRequestByThreeWayInput$commitMessage": "

The commit message to include in the commit information for the merge.

", - "PutFileInput$commitMessage": "

A message about why this file was added or updated. While optional, adding a message is strongly encouraged in order to provide a more useful commit history for your repository.

" + "PutFileInput$commitMessage": "

A message about why this file was added or updated. Although it is optional, a message makes the commit history for your repository more useful.

" } }, "Mode": { @@ -1683,14 +2164,14 @@ "Name": { "base": null, "refs": { - "CreateCommitInput$authorName": "

The name of the author who created the commit. This information will be used as both the author and committer for the commit.

", - "CreateUnreferencedMergeCommitInput$authorName": "

The name of the author who created the unreferenced commit. This information will be used as both the author and committer for the commit.

", - "DeleteFileInput$name": "

The name of the author of the commit that deletes the file. If no name is specified, the user's ARN will be used as the author name and committer name.

", - "MergeBranchesBySquashInput$authorName": "

The name of the author who created the commit. This information will be used as both the author and committer for the commit.

", - "MergeBranchesByThreeWayInput$authorName": "

The name of the author who created the commit. This information will be used as both the author and committer for the commit.

", - "MergePullRequestBySquashInput$authorName": "

The name of the author who created the commit. This information will be used as both the author and committer for the commit.

", - "MergePullRequestByThreeWayInput$authorName": "

The name of the author who created the commit. This information will be used as both the author and committer for the commit.

", - "PutFileInput$name": "

The name of the person adding or updating the file. While optional, adding a name is strongly encouraged in order to provide a more useful commit history for your repository.

", + "CreateCommitInput$authorName": "

The name of the author who created the commit. This information is used as both the author and committer for the commit.

", + "CreateUnreferencedMergeCommitInput$authorName": "

The name of the author who created the unreferenced commit. This information is used as both the author and committer for the commit.

", + "DeleteFileInput$name": "

The name of the author of the commit that deletes the file. If no name is specified, the user's ARN is used as the author name and committer name.

", + "MergeBranchesBySquashInput$authorName": "

The name of the author who created the commit. This information is used as both the author and committer for the commit.

", + "MergeBranchesByThreeWayInput$authorName": "

The name of the author who created the commit. This information is used as both the author and committer for the commit.

", + "MergePullRequestBySquashInput$authorName": "

The name of the author who created the commit. This information is used as both the author and committer for the commit.

", + "MergePullRequestByThreeWayInput$authorName": "

The name of the author who created the commit. This information is used as both the author and committer for the commit.

", + "PutFileInput$name": "

The name of the person adding or updating the file. Although it is optional, a name makes the commit history for your repository more useful.

", "UserInfo$name": "

The name of the user who made the specified commit.

" } }, @@ -1702,27 +2183,33 @@ "NextToken": { "base": null, "refs": { - "BatchDescribeMergeConflictsInput$nextToken": "

An enumeration token that when provided in a request, returns the next batch of the results.

", + "BatchDescribeMergeConflictsInput$nextToken": "

An enumeration token that, when provided in a request, returns the next batch of the results.

", "BatchDescribeMergeConflictsOutput$nextToken": "

An enumeration token that can be used in a request to return the next batch of the results.

", - "DescribeMergeConflictsInput$nextToken": "

An enumeration token that when provided in a request, returns the next batch of the results.

", + "DescribeMergeConflictsInput$nextToken": "

An enumeration token that, when provided in a request, returns the next batch of the results.

", "DescribeMergeConflictsOutput$nextToken": "

An enumeration token that can be used in a request to return the next batch of the results.

", - "DescribePullRequestEventsInput$nextToken": "

An enumeration token that when provided in a request, returns the next batch of the results.

", + "DescribePullRequestEventsInput$nextToken": "

An enumeration token that, when provided in a request, returns the next batch of the results.

", "DescribePullRequestEventsOutput$nextToken": "

An enumeration token that can be used in a request to return the next batch of the results.

", "GetCommentsForComparedCommitInput$nextToken": "

An enumeration token that when provided in a request, returns the next batch of the results.

", "GetCommentsForComparedCommitOutput$nextToken": "

An enumeration token that can be used in a request to return the next batch of the results.

", - "GetCommentsForPullRequestInput$nextToken": "

An enumeration token that when provided in a request, returns the next batch of the results.

", + "GetCommentsForPullRequestInput$nextToken": "

An enumeration token that, when provided in a request, returns the next batch of the results.

", "GetCommentsForPullRequestOutput$nextToken": "

An enumeration token that can be used in a request to return the next batch of the results.

", - "GetDifferencesInput$NextToken": "

An enumeration token that when provided in a request, returns the next batch of the results.

", + "GetDifferencesInput$NextToken": "

An enumeration token that, when provided in a request, returns the next batch of the results.

", "GetDifferencesOutput$NextToken": "

An enumeration token that can be used in a request to return the next batch of the results.

", - "GetMergeConflictsInput$nextToken": "

An enumeration token that when provided in a request, returns the next batch of the results.

", + "GetMergeConflictsInput$nextToken": "

An enumeration token that, when provided in a request, returns the next batch of the results.

", "GetMergeConflictsOutput$nextToken": "

An enumeration token that can be used in a request to return the next batch of the results.

", + "ListApprovalRuleTemplatesInput$nextToken": "

An enumeration token that, when provided in a request, returns the next batch of the results.

", + "ListApprovalRuleTemplatesOutput$nextToken": "

An enumeration token that allows the operation to batch the next results of the operation.

", + "ListAssociatedApprovalRuleTemplatesForRepositoryInput$nextToken": "

An enumeration token that, when provided in a request, returns the next batch of the results.

", + "ListAssociatedApprovalRuleTemplatesForRepositoryOutput$nextToken": "

An enumeration token that allows the operation to batch the next results of the operation.

", "ListBranchesInput$nextToken": "

An enumeration token that allows the operation to batch the results.

", "ListBranchesOutput$nextToken": "

An enumeration token that returns the batch of the results.

", - "ListPullRequestsInput$nextToken": "

An enumeration token that when provided in a request, returns the next batch of the results.

", - "ListPullRequestsOutput$nextToken": "

An enumeration token that when provided in a request, returns the next batch of the results.

", + "ListPullRequestsInput$nextToken": "

An enumeration token that, when provided in a request, returns the next batch of the results.

", + "ListPullRequestsOutput$nextToken": "

An enumeration token that allows the operation to batch the next results of the operation.

", + "ListRepositoriesForApprovalRuleTemplateInput$nextToken": "

An enumeration token that, when provided in a request, returns the next batch of the results.

", + "ListRepositoriesForApprovalRuleTemplateOutput$nextToken": "

An enumeration token that allows the operation to batch the next results of the operation.

", "ListRepositoriesInput$nextToken": "

An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.

", "ListRepositoriesOutput$nextToken": "

An enumeration token that allows the operation to batch the results of the operation. Batch sizes are 1,000 for list repository operations. When the client sends the token back to AWS CodeCommit, another page of 1,000 records is retrieved.

", - "ListTagsForResourceInput$nextToken": "

An enumeration token that when provided in a request, returns the next batch of the results.

", + "ListTagsForResourceInput$nextToken": "

An enumeration token that, when provided in a request, returns the next batch of the results.

", "ListTagsForResourceOutput$nextToken": "

An enumeration token that allows the operation to batch the next results of the operation.

" } }, @@ -1737,6 +2224,16 @@ "ConflictMetadata$numberOfConflicts": "

The number of conflicts, including both hunk conflicts and metadata conflicts.

" } }, + "NumberOfRuleTemplatesExceededException": { + "base": "

The maximum number of approval rule templates has been exceeded for this AWS Region.

", + "refs": { + } + }, + "NumberOfRulesExceededException": { + "base": "

The approval rule cannot be added. The pull request has the maximum number of approval rules associated with it.

", + "refs": { + } + }, "ObjectId": { "base": null, "refs": { @@ -1745,11 +2242,11 @@ "BatchDescribeMergeConflictsOutput$baseCommitId": "

The commit ID of the merge base.

", "BatchGetCommitsError$commitId": "

A commit ID that either could not be found or was not in a valid format.

", "BlobMetadata$blobId": "

The full ID of the blob.

", - "CommentsForComparedCommit$beforeBlobId": "

The full blob ID of the commit used to establish the 'before' of the comparison.

", - "CommentsForComparedCommit$afterBlobId": "

The full blob ID of the commit used to establish the 'after' of the comparison.

", + "CommentsForComparedCommit$beforeBlobId": "

The full blob ID of the commit used to establish the before of the comparison.

", + "CommentsForComparedCommit$afterBlobId": "

The full blob ID of the commit used to establish the after of the comparison.

", "CommentsForPullRequest$beforeBlobId": "

The full blob ID of the file on which you want to comment on the destination commit.

", "CommentsForPullRequest$afterBlobId": "

The full blob ID of the file on which you want to comment on the source commit.

", - "Commit$commitId": "

The full SHA of the specified commit.

", + "Commit$commitId": "

The full SHA ID of the specified commit.

", "Commit$treeId": "

Tree information for the specified commit.

", "CommitIdsInputList$member": null, "CreateCommitOutput$commitId": "

The full commit ID of the commit that contains your committed file changes.

", @@ -1766,15 +2263,15 @@ "FileMetadata$blobId": "

The blob ID that contains the file information.

", "Folder$treeId": "

The full SHA-1 pointer of the tree information for the commit that contains the folder.

", "GetBlobInput$blobId": "

The ID of the blob, which is its SHA-1 pointer.

", - "GetCommitInput$commitId": "

The commit ID. Commit IDs are the full SHA of the commit.

", + "GetCommitInput$commitId": "

The commit ID. Commit IDs are the full SHA ID of the commit.

", "GetFileOutput$commitId": "

The full commit ID of the commit that contains the content returned by GetFile.

", "GetFileOutput$blobId": "

The blob ID of the object that represents the file content.

", - "GetFolderOutput$commitId": "

The full commit ID used as a reference for which version of the folder content is returned.

", + "GetFolderOutput$commitId": "

The full commit ID used as a reference for the returned version of the folder content.

", "GetFolderOutput$treeId": "

The full SHA-1 pointer of the tree information for the commit that contains the folder.

", "GetMergeCommitOutput$sourceCommitId": "

The commit ID of the source commit specifier that was used in the merge evaluation.

", "GetMergeCommitOutput$destinationCommitId": "

The commit ID of the destination commit specifier that was used in the merge evaluation.

", "GetMergeCommitOutput$baseCommitId": "

The commit ID of the merge base.

", - "GetMergeCommitOutput$mergedCommitId": "

The commit ID for the merge commit created when the source branch was merged into the destination branch. If the fast-forward merge strategy was used, no merge commit exists.

", + "GetMergeCommitOutput$mergedCommitId": "

The commit ID for the merge commit created when the source branch was merged into the destination branch. If the fast-forward merge strategy was used, there is no merge commit.

", "GetMergeConflictsOutput$destinationCommitId": "

The commit ID of the destination commit specifier that was used in the merge evaluation.

", "GetMergeConflictsOutput$sourceCommitId": "

The commit ID of the source commit specifier that was used in the merge evaluation.

", "GetMergeConflictsOutput$baseCommitId": "

The commit ID of the merge base.

", @@ -1791,11 +2288,11 @@ "MergePullRequestBySquashInput$sourceCommitId": "

The full commit ID of the original or updated commit in the pull request source branch. Pass this value if you want an exception thrown if the current commit ID of the tip of the source branch does not match this commit ID.

", "MergePullRequestByThreeWayInput$sourceCommitId": "

The full commit ID of the original or updated commit in the pull request source branch. Pass this value if you want an exception thrown if the current commit ID of the tip of the source branch does not match this commit ID.

", "ParentList$member": null, - "PostCommentForComparedCommitOutput$beforeBlobId": "

In the directionality you established, the blob ID of the 'before' blob.

", - "PostCommentForComparedCommitOutput$afterBlobId": "

In the directionality you established, the blob ID of the 'after' blob.

", - "PostCommentForPullRequestOutput$beforeBlobId": "

In the directionality of the pull request, the blob ID of the 'before' blob.

", - "PostCommentForPullRequestOutput$afterBlobId": "

In the directionality of the pull request, the blob ID of the 'after' blob.

", - "PutFileOutput$commitId": "

The full SHA of the commit that contains this file change.

", + "PostCommentForComparedCommitOutput$beforeBlobId": "

In the directionality you established, the blob ID of the before blob.

", + "PostCommentForComparedCommitOutput$afterBlobId": "

In the directionality you established, the blob ID of the after blob.

", + "PostCommentForPullRequestOutput$beforeBlobId": "

In the directionality of the pull request, the blob ID of the before blob.

", + "PostCommentForPullRequestOutput$afterBlobId": "

In the directionality of the pull request, the blob ID of the after blob.

", + "PutFileOutput$commitId": "

The full SHA ID of the commit that contains this file change.

", "PutFileOutput$blobId": "

The ID of the blob, which is its SHA-1 pointer.

", "PutFileOutput$treeId": "

The full SHA-1 pointer of the tree information for the commit that contains this file change.

", "SubModule$commitId": "

The commit ID that contains the reference to the submodule.

", @@ -1828,6 +2325,41 @@ "ListRepositoriesInput$order": "

The order in which to sort the results of a list repositories operation.

" } }, + "OriginApprovalRuleTemplate": { + "base": "

Returns information about the template that created the approval rule for a pull request.

", + "refs": { + "ApprovalRule$originApprovalRuleTemplate": "

The approval rule template used to create the rule.

" + } + }, + "Overridden": { + "base": null, + "refs": { + "Evaluation$overridden": "

Whether the approval rule requirements for the pull request have been overridden and no longer need to be met.

", + "GetPullRequestOverrideStateOutput$overridden": "

A Boolean value that indicates whether a pull request has had its rules set aside (TRUE) or whether all approval rules still apply (FALSE).

" + } + }, + "OverrideAlreadySetException": { + "base": "

The pull request has already had its approval rules set to override.

", + "refs": { + } + }, + "OverridePullRequestApprovalRulesInput": { + "base": null, + "refs": { + } + }, + "OverrideStatus": { + "base": null, + "refs": { + "ApprovalRuleOverriddenEventMetadata$overrideStatus": "

The status of the override event.

", + "OverridePullRequestApprovalRulesInput$overrideStatus": "

Whether you want to set aside approval rule requirements for the pull request (OVERRIDE) or revoke a previous override and apply approval rule requirements (REVOKE). REVOKE status is not stored.

" + } + }, + "OverrideStatusRequiredException": { + "base": "

An override status is required, but no value was provided. Valid values include OVERRIDE and REVOKE.

", + "refs": { + } + }, "ParentCommitDoesNotExistException": { "base": "

The parent commit ID is not valid because it does not exist. The specified parent commit ID does not exist in the specified branch of the repository.

", "refs": { @@ -1853,33 +2385,33 @@ "base": null, "refs": { "BatchDescribeMergeConflictsError$filePath": "

The path to the file.

", - "BlobMetadata$path": "

The path to the blob and any associated file name, if any.

", + "BlobMetadata$path": "

The path to the blob and associated file name, if any.

", "ConflictMetadata$filePath": "

The path of the file that contains conflicts.

", - "DeleteFileEntry$filePath": "

The full path of the file that will be deleted, including the name of the file.

", - "DeleteFileInput$filePath": "

The fully-qualified path to the file that will be deleted, including the full name and extension of that file. For example, /examples/file.md is a fully qualified path to a file named file.md in a folder named examples.

", - "DeleteFileOutput$filePath": "

The fully-qualified path to the file that will be deleted, including the full name and extension of that file.

", + "DeleteFileEntry$filePath": "

The full path of the file to be deleted, including the name of the file.

", + "DeleteFileInput$filePath": "

The fully qualified path to the file that to be deleted, including the full name and extension of that file. For example, /examples/file.md is a fully qualified path to a file named file.md in a folder named examples.

", + "DeleteFileOutput$filePath": "

The fully qualified path to the file to be deleted, including the full name and extension of that file.

", "DescribeMergeConflictsInput$filePath": "

The path of the target files used to describe the conflicts.

", - "File$absolutePath": "

The fully-qualified path to the file in the repository.

", + "File$absolutePath": "

The fully qualified path to the file in the repository.

", "File$relativePath": "

The relative path of the file from the folder where the query originated.

", - "FileMetadata$absolutePath": "

The full path to the file that will be added or updated, including the name of the file.

", + "FileMetadata$absolutePath": "

The full path to the file to be added or updated, including the name of the file.

", "FilePaths$member": null, - "Folder$absolutePath": "

The fully-qualified path of the folder in the repository.

", + "Folder$absolutePath": "

The fully qualified path of the folder in the repository.

", "Folder$relativePath": "

The relative path of the specified folder from the folder where the query originated.

", - "GetDifferencesInput$beforePath": "

The file path in which to check for differences. Limits the results to this path. Can also be used to specify the previous name of a directory or folder. If beforePath and afterPath are not specified, differences will be shown for all paths.

", - "GetDifferencesInput$afterPath": "

The file path in which to check differences. Limits the results to this path. Can also be used to specify the changed name of a directory or folder, if it has changed. If not specified, differences will be shown for all paths.

", - "GetFileInput$filePath": "

The fully-qualified path to the file, including the full name and extension of the file. For example, /examples/file.md is the fully-qualified path to a file named file.md in a folder named examples.

", - "GetFileOutput$filePath": "

The fully qualified path to the specified file. This returns the name and extension of the file.

", - "GetFolderInput$folderPath": "

The fully-qualified path to the folder whose contents will be returned, including the folder name. For example, /examples is a fully-qualified path to a folder named examples that was created off of the root directory (/) of a repository.

", - "GetFolderOutput$folderPath": "

The fully-qualified path of the folder whose contents are returned.

", + "GetDifferencesInput$beforePath": "

The file path in which to check for differences. Limits the results to this path. Can also be used to specify the previous name of a directory or folder. If beforePath and afterPath are not specified, differences are shown for all paths.

", + "GetDifferencesInput$afterPath": "

The file path in which to check differences. Limits the results to this path. Can also be used to specify the changed name of a directory or folder, if it has changed. If not specified, differences are shown for all paths.

", + "GetFileInput$filePath": "

The fully qualified path to the file, including the full name and extension of the file. For example, /examples/file.md is the fully qualified path to a file named file.md in a folder named examples.

", + "GetFileOutput$filePath": "

The fully qualified path to the specified file. Returns the name and extension of the file.

", + "GetFolderInput$folderPath": "

The fully qualified path to the folder whose contents are returned, including the folder name. For example, /examples is a fully-qualified path to a folder named examples that was created off of the root directory (/) of a repository.

", + "GetFolderOutput$folderPath": "

The fully qualified path of the folder whose contents are returned.

", "Location$filePath": "

The name of the file being compared, including its extension and subdirectory, if any.

", "PutFileEntry$filePath": "

The full path to the file in the repository, including the name of the file.

", - "PutFileInput$filePath": "

The name of the file you want to add or update, including the relative path to the file in the repository.

If the path does not currently exist in the repository, the path will be created as part of adding the file.

", + "PutFileInput$filePath": "

The name of the file you want to add or update, including the relative path to the file in the repository.

If the path does not currently exist in the repository, the path is created as part of adding the file.

", "ReplaceContentEntry$filePath": "

The path of the conflicting file.

", "SetFileModeEntry$filePath": "

The full path to the file, including the name of the file.

", "SourceFileSpecifier$filePath": "

The full path to the file, including the name of the file.

", "SubModule$absolutePath": "

The fully qualified path to the folder that contains the reference to the submodule.

", "SubModule$relativePath": "

The relative path of the submodule from the folder where the query originated.

", - "SymbolicLink$absolutePath": "

The fully-qualified path to the folder that contains the symbolic link.

", + "SymbolicLink$absolutePath": "

The fully qualified path to the folder that contains the symbolic link.

", "SymbolicLink$relativePath": "

The relative path of the symbolic link from the folder where the query originated.

" } }, @@ -1896,7 +2428,7 @@ "Position": { "base": null, "refs": { - "Location$filePosition": "

The position of a change within a compared file, in line number format.

" + "Location$filePosition": "

The position of a change in a compared file, in line number format.

" } }, "PostCommentForComparedCommitInput": { @@ -1934,7 +2466,7 @@ "refs": { "CreatePullRequestOutput$pullRequest": "

Information about the newly created pull request.

", "GetPullRequestOutput$pullRequest": "

Information about the specified pull request.

", - "MergePullRequestByFastForwardOutput$pullRequest": "

Information about the specified pull request, including information about the merge.

", + "MergePullRequestByFastForwardOutput$pullRequest": "

Information about the specified pull request, including the merge.

", "MergePullRequestBySquashOutput$pullRequest": null, "MergePullRequestByThreeWayOutput$pullRequest": null, "UpdatePullRequestDescriptionOutput$pullRequest": "

Information about the updated pull request.

", @@ -1947,6 +2479,16 @@ "refs": { } }, + "PullRequestApprovalRulesNotSatisfiedException": { + "base": "

The pull request cannot be merged because one or more approval rules applied to the pull request have conditions that have not been met.

", + "refs": { + } + }, + "PullRequestCannotBeApprovedByAuthorException": { + "base": "

The approval cannot be applied because the user approving the pull request matches the user who created the pull request. You cannot approve a pull request that you created.

", + "refs": { + } + }, "PullRequestCreatedEventMetadata": { "base": "

Metadata about the pull request that is used when comparing the pull request source with its destination.

", "refs": { @@ -1974,24 +2516,32 @@ "base": null, "refs": { "DescribePullRequestEventsInput$pullRequestEventType": "

Optional. The pull request event type about which you want to return information.

", - "PullRequestEvent$pullRequestEventType": "

The type of the pull request event, for example a status change event (PULL_REQUEST_STATUS_CHANGED) or update event (PULL_REQUEST_SOURCE_REFERENCE_UPDATED).

" + "PullRequestEvent$pullRequestEventType": "

The type of the pull request event (for example, a status change event (PULL_REQUEST_STATUS_CHANGED) or update event (PULL_REQUEST_SOURCE_REFERENCE_UPDATED)).

" } }, "PullRequestId": { "base": null, "refs": { "CommentsForPullRequest$pullRequestId": "

The system-generated ID of the pull request.

", + "CreatePullRequestApprovalRuleInput$pullRequestId": "

The system-generated ID of the pull request for which you want to create the approval rule.

", + "DeletePullRequestApprovalRuleInput$pullRequestId": "

The system-generated ID of the pull request that contains the approval rule you want to delete.

", "DescribePullRequestEventsInput$pullRequestId": "

The system-generated ID of the pull request. To get this ID, use ListPullRequests.

", + "EvaluatePullRequestApprovalRulesInput$pullRequestId": "

The system-generated ID of the pull request you want to evaluate.

", "GetCommentsForPullRequestInput$pullRequestId": "

The system-generated ID of the pull request. To get this ID, use ListPullRequests.

", + "GetPullRequestApprovalStatesInput$pullRequestId": "

The system-generated ID for the pull request.

", "GetPullRequestInput$pullRequestId": "

The system-generated ID of the pull request. To get this ID, use ListPullRequests.

", + "GetPullRequestOverrideStateInput$pullRequestId": "

The ID of the pull request for which you want to get information about whether approval rules have been set aside (overridden).

", "MergePullRequestByFastForwardInput$pullRequestId": "

The system-generated ID of the pull request. To get this ID, use ListPullRequests.

", "MergePullRequestBySquashInput$pullRequestId": "

The system-generated ID of the pull request. To get this ID, use ListPullRequests.

", "MergePullRequestByThreeWayInput$pullRequestId": "

The system-generated ID of the pull request. To get this ID, use ListPullRequests.

", + "OverridePullRequestApprovalRulesInput$pullRequestId": "

The system-generated ID of the pull request for which you want to override all approval rule requirements. To get this information, use GetPullRequest.

", "PostCommentForPullRequestInput$pullRequestId": "

The system-generated ID of the pull request. To get this ID, use ListPullRequests.

", "PostCommentForPullRequestOutput$pullRequestId": "

The system-generated ID of the pull request.

", "PullRequest$pullRequestId": "

The system-generated ID of the pull request.

", "PullRequestEvent$pullRequestId": "

The system-generated ID of the pull request.

", "PullRequestIdList$member": null, + "UpdatePullRequestApprovalRuleContentInput$pullRequestId": "

The system-generated ID of the pull request.

", + "UpdatePullRequestApprovalStateInput$pullRequestId": "

The system-generated ID of the pull request.

", "UpdatePullRequestDescriptionInput$pullRequestId": "

The system-generated ID of the pull request. To get this ID, use ListPullRequests.

", "UpdatePullRequestStatusInput$pullRequestId": "

The system-generated ID of the pull request. To get this ID, use ListPullRequests.

", "UpdatePullRequestTitleInput$pullRequestId": "

The system-generated ID of the pull request. To get this ID, use ListPullRequests.

" @@ -2032,7 +2582,7 @@ "ListPullRequestsInput$pullRequestStatus": "

Optional. The status of the pull request. If used, this refines the results to the pull requests that match the specified status.

", "PullRequest$pullRequestStatus": "

The status of the pull request. Pull request status can only change from OPEN to CLOSED.

", "PullRequestStatusChangedEventMetadata$pullRequestStatus": "

The changed status of the pull request.

", - "UpdatePullRequestStatusInput$pullRequestStatus": "

The status of the pull request. The only valid operations are to update the status from OPEN to OPEN, OPEN to CLOSED or from from CLOSED to CLOSED.

" + "UpdatePullRequestStatusInput$pullRequestStatus": "

The status of the pull request. The only valid operations are to update the status from OPEN to OPEN, OPEN to CLOSED or from CLOSED to CLOSED.

" } }, "PullRequestStatusRequiredException": { @@ -2059,7 +2609,7 @@ } }, "PutFileEntry": { - "base": "

Information about a file that will be added or updated as part of a commit.

", + "base": "

Information about a file added or updated as part of a commit.

", "refs": { "PutFileEntries$member": null } @@ -2080,7 +2630,7 @@ } }, "PutRepositoryTriggersInput": { - "base": "

Represents the input ofa put repository triggers operation.

", + "base": "

Represents the input of a put repository triggers operation.

", "refs": { } }, @@ -2097,11 +2647,11 @@ "ReferenceName": { "base": null, "refs": { - "PullRequestMergedStateChangedEventMetadata$destinationReference": "

The name of the branch that the pull request will be merged into.

", + "PullRequestMergedStateChangedEventMetadata$destinationReference": "

The name of the branch that the pull request is merged into.

", "PullRequestTarget$sourceReference": "

The branch of the repository that contains the changes for the pull request. Also known as the source branch.

", - "PullRequestTarget$destinationReference": "

The branch of the repository where the pull request changes will be merged into. Also known as the destination branch.

", + "PullRequestTarget$destinationReference": "

The branch of the repository where the pull request changes are merged. Also known as the destination branch.

", "Target$sourceReference": "

The branch of the repository that contains the changes for the pull request. Also known as the source branch.

", - "Target$destinationReference": "

The branch of the repository where the pull request changes will be merged into. Also known as the destination branch.

" + "Target$destinationReference": "

The branch of the repository where the pull request changes are merged. Also known as the destination branch.

" } }, "ReferenceNameRequiredException": { @@ -2117,13 +2667,13 @@ "RelativeFileVersionEnum": { "base": null, "refs": { - "Location$relativeFileVersion": "

In a comparison of commits or a pull request, whether the change is in the 'before' or 'after' of that comparison.

" + "Location$relativeFileVersion": "

In a comparison of commits or a pull request, whether the change is in the before or after of that comparison.

" } }, "ReplaceContentEntries": { "base": null, "refs": { - "ConflictResolution$replaceContents": "

Files that will have content replaced as part of the merge conflict resolution.

" + "ConflictResolution$replaceContents": "

Files to have content replaced as part of the merge conflict resolution.

" } }, "ReplaceContentEntry": { @@ -2133,7 +2683,7 @@ } }, "ReplacementContentRequiredException": { - "base": "

USE_NEW_CONTENT was specified but no replacement content has been provided.

", + "base": "

USE_NEW_CONTENT was specified, but no replacement content has been provided.

", "refs": { } }, @@ -2151,7 +2701,7 @@ "RepositoryDescription": { "base": null, "refs": { - "CreateRepositoryInput$repositoryDescription": "

A comment or description about the new repository.

The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a web page could expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a web page.

", + "CreateRepositoryInput$repositoryDescription": "

A comment or description about the new repository.

The description field for a repository accepts all HTML characters and all valid Unicode characters. Applications that do not HTML-encode the description and display it in a webpage can expose users to potentially malicious code. Make sure that you HTML-encode the description field in any application that uses this API to display the repository description on a webpage.

", "RepositoryMetadata$repositoryDescription": "

A comment or description about the repository.

", "UpdateRepositoryDescriptionInput$repositoryDescription": "

The new comment or description for the specified repository. Repository descriptions are limited to 1,000 characters.

" } @@ -2191,18 +2741,22 @@ "RepositoryName": { "base": null, "refs": { + "AssociateApprovalRuleTemplateWithRepositoryInput$repositoryName": "

The name of the repository that you want to associate with the template.

", + "BatchAssociateApprovalRuleTemplateWithRepositoriesError$repositoryName": "

The name of the repository where the association was not made.

", "BatchDescribeMergeConflictsInput$repositoryName": "

The name of the repository that contains the merge conflicts you want to review.

", + "BatchDisassociateApprovalRuleTemplateFromRepositoriesError$repositoryName": "

The name of the repository where the association with the template was not able to be removed.

", "BatchGetCommitsInput$repositoryName": "

The name of the repository that contains the commits.

", "CommentsForComparedCommit$repositoryName": "

The name of the repository that contains the compared commits.

", "CommentsForPullRequest$repositoryName": "

The name of the repository that contains the pull request.

", "CreateBranchInput$repositoryName": "

The name of the repository in which you want to create the new branch.

", - "CreateCommitInput$repositoryName": "

The name of the repository where you will create the commit.

", - "CreateRepositoryInput$repositoryName": "

The name of the new repository to be created.

The repository name must be unique across the calling AWS account. In addition, repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. For a full description of the limits on repository names, see Limits in the AWS CodeCommit User Guide. The suffix \".git\" is prohibited.

", + "CreateCommitInput$repositoryName": "

The name of the repository where you create the commit.

", + "CreateRepositoryInput$repositoryName": "

The name of the new repository to be created.

The repository name must be unique across the calling AWS account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. For more information about the limits on repository names, see Limits in the AWS CodeCommit User Guide. The suffix .git is prohibited.

", "CreateUnreferencedMergeCommitInput$repositoryName": "

The name of the repository where you want to create the unreferenced merge commit.

", "DeleteBranchInput$repositoryName": "

The name of the repository that contains the branch to be deleted.

", "DeleteFileInput$repositoryName": "

The name of the repository that contains the file to delete.

", "DeleteRepositoryInput$repositoryName": "

The name of the repository to delete.

", "DescribeMergeConflictsInput$repositoryName": "

The name of the repository where you want to get information about a merge conflict.

", + "DisassociateApprovalRuleTemplateFromRepositoryInput$repositoryName": "

The name of the repository you want to disassociate from the template.

", "GetBlobInput$repositoryName": "

The name of the repository that contains the blob.

", "GetBranchInput$repositoryName": "

The name of the repository that contains the branch for which you want to retrieve information.

", "GetCommentsForComparedCommitInput$repositoryName": "

The name of the repository where you want to compare commits.

", @@ -2216,6 +2770,7 @@ "GetMergeOptionsInput$repositoryName": "

The name of the repository that contains the commits about which you want to get merge options.

", "GetRepositoryInput$repositoryName": "

The name of the repository to get information about.

", "GetRepositoryTriggersInput$repositoryName": "

The name of the repository for which the trigger is configured.

", + "ListAssociatedApprovalRuleTemplatesForRepositoryInput$repositoryName": "

The name of the repository for which you want to list all associated approval rule templates.

", "ListBranchesInput$repositoryName": "

The name of the repository that contains the branches.

", "ListPullRequestsInput$repositoryName": "

The name of the repository for which you want to list pull requests.

", "MergeBranchesByFastForwardInput$repositoryName": "

The name of the repository where you want to merge two branches.

", @@ -2242,7 +2797,7 @@ "TestRepositoryTriggersInput$repositoryName": "

The name of the repository in which to test the triggers.

", "UpdateDefaultBranchInput$repositoryName": "

The name of the repository to set or change the default branch for.

", "UpdateRepositoryDescriptionInput$repositoryName": "

The name of the repository to set or change the comment or description for.

", - "UpdateRepositoryNameInput$oldName": "

The existing name of the repository.

", + "UpdateRepositoryNameInput$oldName": "

The current name of the repository.

", "UpdateRepositoryNameInput$newName": "

The new name for the repository.

" } }, @@ -2266,16 +2821,21 @@ "RepositoryNameList": { "base": null, "refs": { - "BatchGetRepositoriesInput$repositoryNames": "

The names of the repositories to get information about.

" + "BatchAssociateApprovalRuleTemplateWithRepositoriesInput$repositoryNames": "

The names of the repositories you want to associate with the template.

The length constraint limit is for each string in the array. The array itself can be empty.

", + "BatchAssociateApprovalRuleTemplateWithRepositoriesOutput$associatedRepositoryNames": "

A list of names of the repositories that have been associated with the template.

", + "BatchDisassociateApprovalRuleTemplateFromRepositoriesInput$repositoryNames": "

The repository names that you want to disassociate from the approval rule template.

The length constraint limit is for each string in the array. The array itself can be empty.

", + "BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput$disassociatedRepositoryNames": "

A list of repository names that have had their association with the template removed.

", + "BatchGetRepositoriesInput$repositoryNames": "

The names of the repositories to get information about.

The length constraint limit is for each string in the array. The array itself can be empty.

", + "ListRepositoriesForApprovalRuleTemplateOutput$repositoryNames": "

A list of repository names that are associated with the specified approval rule template.

" } }, "RepositoryNameRequiredException": { - "base": "

A repository name is required but was not specified.

", + "base": "

A repository name is required, but was not specified.

", "refs": { } }, "RepositoryNamesRequiredException": { - "base": "

A repository names object is required but was not specified.

", + "base": "

At least one repository name object is required, but was not specified.

", "refs": { } }, @@ -2297,18 +2857,18 @@ } }, "RepositoryTriggerBranchNameListRequiredException": { - "base": "

At least one branch name is required but was not specified in the trigger configuration.

", + "base": "

At least one branch name is required, but was not specified in the trigger configuration.

", "refs": { } }, "RepositoryTriggerCustomData": { "base": null, "refs": { - "RepositoryTrigger$customData": "

Any custom data associated with the trigger that will be included in the information sent to the target of the trigger.

" + "RepositoryTrigger$customData": "

Any custom data associated with the trigger to be included in the information sent to the target of the trigger.

" } }, "RepositoryTriggerDestinationArnRequiredException": { - "base": "

A destination ARN for the target service for the trigger is required but was not specified.

", + "base": "

A destination ARN for the target service for the trigger is required, but was not specified.

", "refs": { } }, @@ -2321,11 +2881,11 @@ "RepositoryTriggerEventList": { "base": null, "refs": { - "RepositoryTrigger$events": "

The repository events that will cause the trigger to run actions in another service, such as sending a notification through Amazon SNS.

The valid value \"all\" cannot be used with any other values.

" + "RepositoryTrigger$events": "

The repository events that cause the trigger to run actions in another service, such as sending a notification through Amazon SNS.

The valid value \"all\" cannot be used with any other values.

" } }, "RepositoryTriggerEventsListRequiredException": { - "base": "

At least one event for the trigger is required but was not specified.

", + "base": "

At least one event for the trigger is required, but was not specified.

", "refs": { } }, @@ -2338,13 +2898,13 @@ "RepositoryTriggerExecutionFailureList": { "base": null, "refs": { - "TestRepositoryTriggersOutput$failedExecutions": "

The list of triggers that were not able to be tested. This list provides the names of the triggers that could not be tested, separated by commas.

" + "TestRepositoryTriggersOutput$failedExecutions": "

The list of triggers that were not tested. This list provides the names of the triggers that could not be tested, separated by commas.

" } }, "RepositoryTriggerExecutionFailureMessage": { "base": null, "refs": { - "RepositoryTriggerExecutionFailure$failureMessage": "

Additional message information about the trigger that did not run.

" + "RepositoryTriggerExecutionFailure$failureMessage": "

Message information about the trigger that did not run.

" } }, "RepositoryTriggerName": { @@ -2362,7 +2922,7 @@ } }, "RepositoryTriggerNameRequiredException": { - "base": "

A name for the trigger is required but was not specified.

", + "base": "

A name for the trigger is required, but was not specified.

", "refs": { } }, @@ -2382,7 +2942,7 @@ } }, "RepositoryTriggersListRequiredException": { - "base": "

The list of triggers for the repository is required but was not specified.

", + "base": "

The list of triggers for the repository is required, but was not specified.

", "refs": { } }, @@ -2404,6 +2964,38 @@ "refs": { } }, + "RevisionId": { + "base": null, + "refs": { + "ApprovalRuleOverriddenEventMetadata$revisionId": "

The revision ID of the pull request when the override event occurred.

", + "ApprovalStateChangedEventMetadata$revisionId": "

The revision ID of the pull request when the approval state changed.

", + "EvaluatePullRequestApprovalRulesInput$revisionId": "

The system-generated ID for the pull request revision. To retrieve the most recent revision ID for a pull request, use GetPullRequest.

", + "GetPullRequestApprovalStatesInput$revisionId": "

The system-generated ID for the pull request revision.

", + "GetPullRequestOverrideStateInput$revisionId": "

The system-generated ID of the revision for the pull request. To retrieve the most recent revision ID, use GetPullRequest.

", + "OverridePullRequestApprovalRulesInput$revisionId": "

The system-generated ID of the most recent revision of the pull request. You cannot override approval rules for anything but the most recent revision of a pull request. To get the revision ID, use GetPullRequest.

", + "PullRequest$revisionId": "

The system-generated revision ID for the pull request.

", + "UpdatePullRequestApprovalStateInput$revisionId": "

The system-generated ID of the revision.

" + } + }, + "RevisionIdRequiredException": { + "base": "

A revision ID is required, but was not provided.

", + "refs": { + } + }, + "RevisionNotCurrentException": { + "base": "

The revision ID provided in the request does not match the current revision ID. Use GetPullRequest to retrieve the current revision ID.

", + "refs": { + } + }, + "RuleContentSha256": { + "base": null, + "refs": { + "ApprovalRule$ruleContentSha256": "

The SHA-256 hash signature for the content of the approval rule.

", + "ApprovalRuleTemplate$ruleContentSha256": "

The SHA-256 hash signature for the content of the approval rule template.

", + "UpdateApprovalRuleTemplateContentInput$existingRuleContentSha256": "

The SHA-256 hash signature for the content of the approval rule. You can retrieve this information by using GetPullRequest.

", + "UpdatePullRequestApprovalRuleContentInput$existingRuleContentSha256": "

The SHA-256 hash signature for the content of the approval rule. You can retrieve this information by using GetPullRequest.

" + } + }, "SameFileContentException": { "base": "

The file was not added or updated because the content of the file is exactly the same as the content of that file in the repository and branch that you specified.

", "refs": { @@ -2417,7 +3009,7 @@ "SetFileModeEntries": { "base": null, "refs": { - "ConflictResolution$setFileModes": "

File modes that will be set as part of the merge conflict resolution.

", + "ConflictResolution$setFileModes": "

File modes that are set as part of the merge conflict resolution.

", "CreateCommitInput$setFileModes": "

The file modes to update for files in this commit.

" } }, @@ -2434,7 +3026,7 @@ } }, "SourceAndDestinationAreSameException": { - "base": "

The source branch and the destination branch for the pull request are the same. You must specify different branches for the source and destination.

", + "base": "

The source branch and destination branch for the pull request are the same. You must specify different branches for the source and destination.

", "refs": { } }, @@ -2458,7 +3050,7 @@ "SubModuleList": { "base": null, "refs": { - "GetFolderOutput$subModules": "

The list of submodules that exist in the specified folder, if any.

" + "GetFolderOutput$subModules": "

The list of submodules in the specified folder, if any.

" } }, "SymbolicLink": { @@ -2470,7 +3062,7 @@ "SymbolicLinkList": { "base": null, "refs": { - "GetFolderOutput$symbolicLinks": "

The list of symbolic links to other files and folders that exist in the specified folder, if any.

" + "GetFolderOutput$symbolicLinks": "

The list of symbolic links to other files and folders in the specified folder, if any.

" } }, "TagKey": { @@ -2529,7 +3121,7 @@ "TargetList": { "base": null, "refs": { - "CreatePullRequestInput$targets": "

The targets for the pull request, including the source of the code to be reviewed (the source branch), and the destination where the creator of the pull request intends the code to be merged after the pull request is closed (the destination branch).

" + "CreatePullRequestInput$targets": "

The targets for the pull request, including the source of the code to be reviewed (the source branch) and the destination where the creator of the pull request intends the code to be merged after the pull request is closed (the destination branch).

" } }, "TargetRequiredException": { @@ -2565,9 +3157,9 @@ "Title": { "base": null, "refs": { - "CreatePullRequestInput$title": "

The title of the pull request. This title will be used to identify the pull request to other users in the repository.

", - "PullRequest$title": "

The user-defined title of the pull request. This title is displayed in the list of pull requests to other users of the repository.

", - "UpdatePullRequestTitleInput$title": "

The updated title of the pull request. This will replace the existing title.

" + "CreatePullRequestInput$title": "

The title of the pull request. This title is used to identify the pull request to other users in the repository.

", + "PullRequest$title": "

The user-defined title of the pull request. This title is displayed in the list of pull requests to other repository users.

", + "UpdatePullRequestTitleInput$title": "

The updated title of the pull request. This replaces the existing title.

" } }, "TitleRequiredException": { @@ -2585,6 +3177,36 @@ "refs": { } }, + "UpdateApprovalRuleTemplateContentInput": { + "base": null, + "refs": { + } + }, + "UpdateApprovalRuleTemplateContentOutput": { + "base": null, + "refs": { + } + }, + "UpdateApprovalRuleTemplateDescriptionInput": { + "base": null, + "refs": { + } + }, + "UpdateApprovalRuleTemplateDescriptionOutput": { + "base": null, + "refs": { + } + }, + "UpdateApprovalRuleTemplateNameInput": { + "base": null, + "refs": { + } + }, + "UpdateApprovalRuleTemplateNameOutput": { + "base": null, + "refs": { + } + }, "UpdateCommentInput": { "base": null, "refs": { @@ -2600,6 +3222,21 @@ "refs": { } }, + "UpdatePullRequestApprovalRuleContentInput": { + "base": null, + "refs": { + } + }, + "UpdatePullRequestApprovalRuleContentOutput": { + "base": null, + "refs": { + } + }, + "UpdatePullRequestApprovalStateInput": { + "base": null, + "refs": { + } + }, "UpdatePullRequestDescriptionInput": { "base": null, "refs": { diff --git a/models/apis/codecommit/2015-04-13/paginators-1.json b/models/apis/codecommit/2015-04-13/paginators-1.json index c917eaa80e8..5fcda36b895 100644 --- a/models/apis/codecommit/2015-04-13/paginators-1.json +++ b/models/apis/codecommit/2015-04-13/paginators-1.json @@ -30,6 +30,16 @@ "limit_key": "maxConflictFiles", "output_token": "nextToken" }, + "ListApprovalRuleTemplates": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken" + }, + "ListAssociatedApprovalRuleTemplatesForRepository": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken" + }, "ListBranches": { "input_token": "nextToken", "output_token": "nextToken", @@ -44,6 +54,11 @@ "input_token": "nextToken", "output_token": "nextToken", "result_key": "repositories" + }, + "ListRepositoriesForApprovalRuleTemplate": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken" } } } \ No newline at end of file diff --git a/models/apis/cognito-idp/2016-04-18/api-2.json b/models/apis/cognito-idp/2016-04-18/api-2.json index 3e1fab4ec33..e9c26736531 100644 --- a/models/apis/cognito-idp/2016-04-18/api-2.json +++ b/models/apis/cognito-idp/2016-04-18/api-2.json @@ -2571,7 +2571,8 @@ "REFRESH_TOKEN", "CUSTOM_AUTH", "ADMIN_NO_SRP_AUTH", - "USER_PASSWORD_AUTH" + "USER_PASSWORD_AUTH", + "ADMIN_USER_PASSWORD_AUTH" ] }, "AuthParametersType":{ @@ -2960,7 +2961,8 @@ "AllowedOAuthFlows":{"shape":"OAuthFlowsType"}, "AllowedOAuthScopes":{"shape":"ScopeListType"}, "AllowedOAuthFlowsUserPoolClient":{"shape":"BooleanType"}, - "AnalyticsConfiguration":{"shape":"AnalyticsConfigurationType"} + "AnalyticsConfiguration":{"shape":"AnalyticsConfigurationType"}, + "PreventUserExistenceErrors":{"shape":"PreventUserExistenceErrorTypes"} } }, "CreateUserPoolClientResponse":{ @@ -3357,7 +3359,9 @@ "members":{ "SourceArn":{"shape":"ArnType"}, "ReplyToEmailAddress":{"shape":"EmailAddressType"}, - "EmailSendingAccount":{"shape":"EmailSendingAccountType"} + "EmailSendingAccount":{"shape":"EmailSendingAccountType"}, + "From":{"shape":"StringType"}, + "ConfigurationSet":{"shape":"SESConfigurationSet"} } }, "EmailNotificationBodyType":{ @@ -3488,7 +3492,12 @@ "enum":[ "ADMIN_NO_SRP_AUTH", "CUSTOM_AUTH_FLOW_ONLY", - "USER_PASSWORD_AUTH" + "USER_PASSWORD_AUTH", + "ALLOW_ADMIN_USER_PASSWORD_AUTH", + "ALLOW_CUSTOM_AUTH", + "ALLOW_USER_PASSWORD_AUTH", + "ALLOW_USER_SRP_AUTH", + "ALLOW_REFRESH_TOKEN_AUTH" ] }, "FeedbackValueType":{ @@ -4218,6 +4227,13 @@ }, "exception":true }, + "PreventUserExistenceErrorTypes":{ + "type":"string", + "enum":[ + "LEGACY", + "ENABLED" + ] + }, "ProviderDescription":{ "type":"structure", "members":{ @@ -4424,6 +4440,12 @@ "min":3, "pattern":"^[0-9A-Za-z\\.\\-_]*(?Removes the specified tags from an Amazon Cognito user pool. You can use this action up to 5 times per second, per account

", "UpdateAuthEventFeedback": "

Provides the feedback for an authentication event whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

", "UpdateDeviceStatus": "

Updates the device status.

", - "UpdateGroup": "

Updates the specified group with the specified attributes.

Calling this action requires developer credentials.

", + "UpdateGroup": "

Updates the specified group with the specified attributes.

Calling this action requires developer credentials.

If you don't provide a value for an attribute, it will be set to the default value.

", "UpdateIdentityProvider": "

Updates identity provider information for a user pool.

", - "UpdateResourceServer": "

Updates the name and scopes of resource server. All other fields are read-only.

", + "UpdateResourceServer": "

Updates the name and scopes of resource server. All other fields are read-only.

If you don't provide a value for an attribute, it will be set to the default value.

", "UpdateUserAttributes": "

Allows a user to update a specific attribute (one at a time).

", - "UpdateUserPool": "

Updates the specified user pool with the specified attributes. If you don't provide a value for an attribute, it will be set to the default value. You can get a list of the current user pool settings with .

", - "UpdateUserPoolClient": "

Updates the specified user pool app client with the specified attributes. If you don't provide a value for an attribute, it will be set to the default value. You can get a list of the current user pool app client settings with .

", + "UpdateUserPool": "

Updates the specified user pool with the specified attributes. You can get a list of the current user pool settings with .

If you don't provide a value for an attribute, it will be set to the default value.

", + "UpdateUserPoolClient": "

Updates the specified user pool app client with the specified attributes. You can get a list of the current user pool app client settings with .

If you don't provide a value for an attribute, it will be set to the default value.

", "UpdateUserPoolDomain": "

Updates the Secure Sockets Layer (SSL) certificate for the custom domain for your user pool.

You can use this operation to provide the Amazon Resource Name (ARN) of a new certificate to Amazon Cognito. You cannot use it to change the domain for a user pool.

A custom domain is used to host the Amazon Cognito hosted UI, which provides sign-up and sign-in pages for your application. When you set up a custom domain, you provide a certificate that you manage with AWS Certificate Manager (ACM). When necessary, you can use this operation to change the certificate that you applied to your custom domain.

Usually, this is unnecessary following routine certificate renewal with ACM. When you renew your existing certificate in ACM, the ARN for your certificate remains the same, and your custom domain uses the new certificate automatically.

However, if you replace your existing certificate with a new one, ACM gives the new certificate a new ARN. To apply the new certificate to your custom domain, you must provide this ARN to Amazon Cognito.

When you add your new certificate in ACM, you must choose US East (N. Virginia) as the AWS Region.

After you submit your request, Amazon Cognito requires up to 1 hour to distribute your new certificate to your custom domain.

For more information about adding a custom domain to your user pool, see Using Your Own Domain for the Hosted UI.

", "VerifySoftwareToken": "

Use this API to register a user's entered TOTP code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.

", "VerifyUserAttribute": "

Verifies the specified user attributes in the user pool.

" @@ -573,8 +573,8 @@ "AuthFlowType": { "base": null, "refs": { - "AdminInitiateAuthRequest$AuthFlow": "

The authentication flow for this call to execute. The API action will depend on this value. For example:

Valid values include:

", - "InitiateAuthRequest$AuthFlow": "

The authentication flow for this call to execute. The API action will depend on this value. For example:

Valid values include:

ADMIN_NO_SRP_AUTH is not a valid value.

" + "AdminInitiateAuthRequest$AuthFlow": "

The authentication flow for this call to execute. The API action will depend on this value. For example:

Valid values include:

", + "InitiateAuthRequest$AuthFlow": "

The authentication flow for this call to execute. The API action will depend on this value. For example:

Valid values include:

ADMIN_NO_SRP_AUTH is not a valid value.

" } }, "AuthParametersType": { @@ -744,7 +744,7 @@ "AdminResetUserPasswordRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminResetUserPassword API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminResetUserPassword request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

", "AdminRespondToAuthChallengeRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminRespondToAuthChallenge API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, post authentication, user migration, pre token generation, define auth challenge, create auth challenge, and verify auth challenge response. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminRespondToAuthChallenge request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

", "AdminUpdateUserAttributesRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the AdminUpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your AdminUpdateUserAttributes request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

", - "ConfirmForgotPasswordRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito invokes the functions that are assigned to the post confirmation and pre mutation triggers. When Amazon Cognito invokes either of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmForgotPassword request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

", + "ConfirmForgotPasswordRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmForgotPassword request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

", "ConfirmSignUpRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ConfirmSignUp API action, Amazon Cognito invokes the function that is assigned to the post confirmation trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ConfirmSignUp request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

", "ForgotPasswordRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ForgotPassword API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and user migration. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ForgotPassword request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

", "GetUserAttributeVerificationCodeRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the GetUserAttributeVerificationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your GetUserAttributeVerificationCode request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

", @@ -752,7 +752,7 @@ "ResendConfirmationCodeRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the ResendConfirmationCode API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your ResendConfirmationCode request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

", "RespondToAuthChallengeRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the RespondToAuthChallenge API action, Amazon Cognito invokes any functions that are assigned to the following triggers: post authentication, pre token generation, define auth challenge, create auth challenge, and verify auth challenge. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your RespondToAuthChallenge request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

", "SignUpRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the SignUp API action, Amazon Cognito invokes any functions that are assigned to the following triggers: pre sign-up, custom message, and post confirmation. When Amazon Cognito invokes any of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your SignUp request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

", - "UpdateUserAttributesRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the UpdateUserAttributes API action, Amazon Cognito invokes the functions that are assigned to the custom message and pre mutation triggers. When Amazon Cognito invokes either of these functions, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your UpdateUserAttributes request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

" + "UpdateUserAttributesRequest$ClientMetadata": "

A map of custom key-value pairs that you can provide as input for any custom workflows that this action triggers.

You create custom workflows by assigning AWS Lambda functions to user pool triggers. When you use the UpdateUserAttributes API action, Amazon Cognito invokes the function that is assigned to the custom message trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a clientMetadata attribute, which provides the data that you assigned to the ClientMetadata parameter in your UpdateUserAttributes request. In your function code in AWS Lambda, you can process the clientMetadata value to enhance your workflow for your specific needs.

For more information, see Customizing User Pool Workflows with Lambda Triggers in the Amazon Cognito Developer Guide.

Take the following limitations into consideration when you use the ClientMetadata parameter:

" } }, "ClientNameType": { @@ -1378,9 +1378,9 @@ "ExplicitAuthFlowsListType": { "base": null, "refs": { - "CreateUserPoolClientRequest$ExplicitAuthFlows": "

The explicit authentication flows.

", - "UpdateUserPoolClientRequest$ExplicitAuthFlows": "

Explicit authentication flows.

", - "UserPoolClientType$ExplicitAuthFlows": "

The explicit authentication flows.

" + "CreateUserPoolClientRequest$ExplicitAuthFlows": "

The authentication flows that are supported by the user pool clients. Flow names without the ALLOW_ prefix are deprecated in favor of new names with the ALLOW_ prefix. Note that values with ALLOW_ prefix cannot be used along with values without ALLOW_ prefix.

Valid values include:

", + "UpdateUserPoolClientRequest$ExplicitAuthFlows": "

The authentication flows that are supported by the user pool clients. Flow names without the ALLOW_ prefix are deprecated in favor of new names with the ALLOW_ prefix. Note that values with ALLOW_ prefix cannot be used along with values without ALLOW_ prefix.

Valid values include:

", + "UserPoolClientType$ExplicitAuthFlows": "

The authentication flows that are supported by the user pool clients. Flow names without the ALLOW_ prefix are deprecated in favor of new names with the ALLOW_ prefix. Note that values with ALLOW_ prefix cannot be used along with values without ALLOW_ prefix.

Valid values include:

" } }, "ExplicitAuthFlowsType": { @@ -2033,6 +2033,14 @@ "refs": { } }, + "PreventUserExistenceErrorTypes": { + "base": null, + "refs": { + "CreateUserPoolClientRequest$PreventUserExistenceErrors": "

Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to ENABLED and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs will return a UserNotFoundException exception if the user does not exist in the user pool.

Valid values include:

This setting affects the behavior of following APIs:

After January 1st 2020, the value of PreventUserExistenceErrors will default to ENABLED for newly created user pool clients if no value is provided.

", + "UpdateUserPoolClientRequest$PreventUserExistenceErrors": "

Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to ENABLED and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs will return a UserNotFoundException exception if the user does not exist in the user pool.

Valid values include:

This setting affects the behavior of following APIs:

After January 1st 2020, the value of PreventUserExistenceErrors will default to ENABLED for newly created user pool clients if no value is provided.

", + "UserPoolClientType$PreventUserExistenceErrors": "

Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to ENABLED and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to LEGACY, those APIs will return a UserNotFoundException exception if the user does not exist in the user pool.

Valid values include:

This setting affects the behavior of following APIs:

After January 1st 2020, the value of PreventUserExistenceErrors will default to ENABLED for newly created user pool clients if no value is provided.

" + } + }, "ProviderDescription": { "base": "

A container for identity provider details.

", "refs": { @@ -2231,6 +2239,12 @@ "DomainDescriptionType$S3Bucket": "

The S3 bucket where the static files for this domain are stored.

" } }, + "SESConfigurationSet": { + "base": null, + "refs": { + "EmailConfigurationType$ConfigurationSet": "

The set of configuration rules that can be applied to emails sent using Amazon SES. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:

" + } + }, "SMSMfaSettingsType": { "base": "

The type used for enabling SMS MFA at the user level.

", "refs": { @@ -2503,6 +2517,7 @@ "DeviceSecretVerifierConfigType$PasswordVerifier": "

The password verifier.

", "DeviceSecretVerifierConfigType$Salt": "

The salt.

", "DomainDescriptionType$CloudFrontDistribution": "

The ARN of the CloudFront distribution.

", + "EmailConfigurationType$From": "

Identifies either the sender’s email address or the sender’s name with their email address. For example, testuser@example.com or Test User <testuser@example.com>. This address will appear before the body of the email.

", "EventContextDataType$IpAddress": "

The user's IP address.

", "EventContextDataType$DeviceName": "

The user's device name.

", "EventContextDataType$Timezone": "

The user's time zone.

", diff --git a/models/apis/config/2014-11-12/api-2.json b/models/apis/config/2014-11-12/api-2.json index c70b178d44a..97d0dba03bd 100644 --- a/models/apis/config/2014-11-12/api-2.json +++ b/models/apis/config/2014-11-12/api-2.json @@ -84,6 +84,18 @@ {"shape":"NoSuchConfigurationRecorderException"} ] }, + "DeleteConformancePack":{ + "name":"DeleteConformancePack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteConformancePackRequest"}, + "errors":[ + {"shape":"NoSuchConformancePackException"}, + {"shape":"ResourceInUseException"} + ] + }, "DeleteDeliveryChannel":{ "name":"DeleteDeliveryChannel", "http":{ @@ -122,6 +134,19 @@ {"shape":"OrganizationAccessDeniedException"} ] }, + "DeleteOrganizationConformancePack":{ + "name":"DeleteOrganizationConformancePack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteOrganizationConformancePackRequest"}, + "errors":[ + {"shape":"NoSuchOrganizationConformancePackException"}, + {"shape":"ResourceInUseException"}, + {"shape":"OrganizationAccessDeniedException"} + ] + }, "DeletePendingAggregationRequest":{ "name":"DeletePendingAggregationRequest", "http":{ @@ -321,6 +346,49 @@ {"shape":"NoSuchConfigurationRecorderException"} ] }, + "DescribeConformancePackCompliance":{ + "name":"DescribeConformancePackCompliance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConformancePackComplianceRequest"}, + "output":{"shape":"DescribeConformancePackComplianceResponse"}, + "errors":[ + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"NoSuchConfigRuleInConformancePackException"}, + {"shape":"NoSuchConformancePackException"} + ] + }, + "DescribeConformancePackStatus":{ + "name":"DescribeConformancePackStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConformancePackStatusRequest"}, + "output":{"shape":"DescribeConformancePackStatusResponse"}, + "errors":[ + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"} + ] + }, + "DescribeConformancePacks":{ + "name":"DescribeConformancePacks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeConformancePacksRequest"}, + "output":{"shape":"DescribeConformancePacksResponse"}, + "errors":[ + {"shape":"NoSuchConformancePackException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"} + ] + }, "DescribeDeliveryChannelStatus":{ "name":"DescribeDeliveryChannelStatus", "http":{ @@ -375,6 +443,36 @@ {"shape":"OrganizationAccessDeniedException"} ] }, + "DescribeOrganizationConformancePackStatuses":{ + "name":"DescribeOrganizationConformancePackStatuses", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrganizationConformancePackStatusesRequest"}, + "output":{"shape":"DescribeOrganizationConformancePackStatusesResponse"}, + "errors":[ + {"shape":"NoSuchOrganizationConformancePackException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"OrganizationAccessDeniedException"} + ] + }, + "DescribeOrganizationConformancePacks":{ + "name":"DescribeOrganizationConformancePacks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeOrganizationConformancePacksRequest"}, + "output":{"shape":"DescribeOrganizationConformancePacksResponse"}, + "errors":[ + {"shape":"NoSuchOrganizationConformancePackException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidLimitException"}, + {"shape":"OrganizationAccessDeniedException"} + ] + }, "DescribePendingAggregationRequests":{ "name":"DescribePendingAggregationRequests", "http":{ @@ -544,6 +642,36 @@ {"shape":"InvalidParameterValueException"} ] }, + "GetConformancePackComplianceDetails":{ + "name":"GetConformancePackComplianceDetails", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConformancePackComplianceDetailsRequest"}, + "output":{"shape":"GetConformancePackComplianceDetailsResponse"}, + "errors":[ + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"NoSuchConformancePackException"}, + {"shape":"NoSuchConfigRuleInConformancePackException"}, + {"shape":"InvalidParameterValueException"} + ] + }, + "GetConformancePackComplianceSummary":{ + "name":"GetConformancePackComplianceSummary", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetConformancePackComplianceSummaryRequest"}, + "output":{"shape":"GetConformancePackComplianceSummaryResponse"}, + "errors":[ + {"shape":"NoSuchConformancePackException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"} + ] + }, "GetDiscoveredResourceCounts":{ "name":"GetDiscoveredResourceCounts", "http":{ @@ -573,6 +701,21 @@ {"shape":"OrganizationAccessDeniedException"} ] }, + "GetOrganizationConformancePackDetailedStatus":{ + "name":"GetOrganizationConformancePackDetailedStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetOrganizationConformancePackDetailedStatusRequest"}, + "output":{"shape":"GetOrganizationConformancePackDetailedStatusResponse"}, + "errors":[ + {"shape":"NoSuchOrganizationConformancePackException"}, + {"shape":"InvalidLimitException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"OrganizationAccessDeniedException"} + ] + }, "GetResourceConfigHistory":{ "name":"GetResourceConfigHistory", "http":{ @@ -693,6 +836,22 @@ {"shape":"InvalidRecordingGroupException"} ] }, + "PutConformancePack":{ + "name":"PutConformancePack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutConformancePackRequest"}, + "output":{"shape":"PutConformancePackResponse"}, + "errors":[ + {"shape":"InsufficientPermissionsException"}, + {"shape":"ConformancePackTemplateValidationException"}, + {"shape":"ResourceInUseException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"MaxNumberOfConformancePacksExceededException"} + ] + }, "PutDeliveryChannel":{ "name":"PutDeliveryChannel", "http":{ @@ -743,6 +902,25 @@ {"shape":"InsufficientPermissionsException"} ] }, + "PutOrganizationConformancePack":{ + "name":"PutOrganizationConformancePack", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutOrganizationConformancePackRequest"}, + "output":{"shape":"PutOrganizationConformancePackResponse"}, + "errors":[ + {"shape":"MaxNumberOfOrganizationConformancePacksExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ValidationException"}, + {"shape":"OrganizationAccessDeniedException"}, + {"shape":"InsufficientPermissionsException"}, + {"shape":"OrganizationConformancePackTemplateValidationException"}, + {"shape":"OrganizationAllFeaturesNotEnabledException"}, + {"shape":"NoAvailableOrganizationException"} + ] + }, "PutRemediationConfigurations":{ "name":"PutRemediationConfigurations", "http":{ @@ -1015,6 +1193,11 @@ "max":256, "min":1 }, + "Annotation":{ + "type":"string", + "max":256, + "min":0 + }, "AutoRemediationAttemptSeconds":{ "type":"long", "box":true, @@ -1398,6 +1581,211 @@ "member":{"shape":"ConfigurationRecorderStatus"} }, "ConfigurationStateId":{"type":"string"}, + "ConformancePackArn":{ + "type":"string", + "max":2048, + "min":1 + }, + "ConformancePackComplianceFilters":{ + "type":"structure", + "members":{ + "ConfigRuleNames":{"shape":"ConformancePackConfigRuleNames"}, + "ComplianceType":{"shape":"ConformancePackComplianceType"} + } + }, + "ConformancePackComplianceResourceIds":{ + "type":"list", + "member":{"shape":"StringWithCharLimit256"}, + "max":5, + "min":0 + }, + "ConformancePackComplianceSummary":{ + "type":"structure", + "required":[ + "ConformancePackName", + "ConformancePackComplianceStatus" + ], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "ConformancePackComplianceStatus":{"shape":"ConformancePackComplianceType"} + } + }, + "ConformancePackComplianceSummaryList":{ + "type":"list", + "member":{"shape":"ConformancePackComplianceSummary"}, + "max":5, + "min":1 + }, + "ConformancePackComplianceType":{ + "type":"string", + "enum":[ + "COMPLIANT", + "NON_COMPLIANT" + ] + }, + "ConformancePackConfigRuleNames":{ + "type":"list", + "member":{"shape":"StringWithCharLimit64"}, + "max":10, + "min":0 + }, + "ConformancePackDetail":{ + "type":"structure", + "required":[ + "ConformancePackName", + "ConformancePackArn", + "ConformancePackId", + "DeliveryS3Bucket" + ], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "ConformancePackArn":{"shape":"ConformancePackArn"}, + "ConformancePackId":{"shape":"ConformancePackId"}, + "DeliveryS3Bucket":{"shape":"DeliveryS3Bucket"}, + "DeliveryS3KeyPrefix":{"shape":"DeliveryS3KeyPrefix"}, + "ConformancePackInputParameters":{"shape":"ConformancePackInputParameters"}, + "LastUpdateRequestedTime":{"shape":"Date"}, + "CreatedBy":{"shape":"StringWithCharLimit256"} + } + }, + "ConformancePackDetailList":{ + "type":"list", + "member":{"shape":"ConformancePackDetail"}, + "max":25, + "min":0 + }, + "ConformancePackEvaluationFilters":{ + "type":"structure", + "members":{ + "ConfigRuleNames":{"shape":"ConformancePackConfigRuleNames"}, + "ComplianceType":{"shape":"ConformancePackComplianceType"}, + "ResourceType":{"shape":"StringWithCharLimit256"}, + "ResourceIds":{"shape":"ConformancePackComplianceResourceIds"} + } + }, + "ConformancePackEvaluationResult":{ + "type":"structure", + "required":[ + "ComplianceType", + "EvaluationResultIdentifier", + "ConfigRuleInvokedTime", + "ResultRecordedTime" + ], + "members":{ + "ComplianceType":{"shape":"ConformancePackComplianceType"}, + "EvaluationResultIdentifier":{"shape":"EvaluationResultIdentifier"}, + "ConfigRuleInvokedTime":{"shape":"Date"}, + "ResultRecordedTime":{"shape":"Date"}, + "Annotation":{"shape":"Annotation"} + } + }, + "ConformancePackId":{ + "type":"string", + "max":1024, + "min":1 + }, + "ConformancePackInputParameter":{ + "type":"structure", + "required":[ + "ParameterName", + "ParameterValue" + ], + "members":{ + "ParameterName":{"shape":"ParameterName"}, + "ParameterValue":{"shape":"ParameterValue"} + } + }, + "ConformancePackInputParameters":{ + "type":"list", + "member":{"shape":"ConformancePackInputParameter"}, + "max":60, + "min":0 + }, + "ConformancePackName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z][-a-zA-Z0-9]*" + }, + "ConformancePackNamesList":{ + "type":"list", + "member":{"shape":"ConformancePackName"}, + "max":25, + "min":0 + }, + "ConformancePackNamesToSummarizeList":{ + "type":"list", + "member":{"shape":"ConformancePackName"}, + "max":5, + "min":1 + }, + "ConformancePackRuleCompliance":{ + "type":"structure", + "members":{ + "ConfigRuleName":{"shape":"ConfigRuleName"}, + "ComplianceType":{"shape":"ConformancePackComplianceType"} + } + }, + "ConformancePackRuleComplianceList":{ + "type":"list", + "member":{"shape":"ConformancePackRuleCompliance"}, + "max":1000, + "min":0 + }, + "ConformancePackRuleEvaluationResultsList":{ + "type":"list", + "member":{"shape":"ConformancePackEvaluationResult"}, + "max":100, + "min":0 + }, + "ConformancePackState":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_COMPLETE", + "CREATE_FAILED", + "DELETE_IN_PROGRESS", + "DELETE_FAILED" + ] + }, + "ConformancePackStatusDetail":{ + "type":"structure", + "required":[ + "ConformancePackName", + "ConformancePackId", + "ConformancePackArn", + "ConformancePackState", + "StackArn", + "LastUpdateRequestedTime" + ], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "ConformancePackId":{"shape":"ConformancePackId"}, + "ConformancePackArn":{"shape":"ConformancePackArn"}, + "ConformancePackState":{"shape":"ConformancePackState"}, + "StackArn":{"shape":"StackArn"}, + "ConformancePackStatusReason":{"shape":"ConformancePackStatusReason"}, + "LastUpdateRequestedTime":{"shape":"Date"}, + "LastUpdateCompletedTime":{"shape":"Date"} + } + }, + "ConformancePackStatusDetailsList":{ + "type":"list", + "member":{"shape":"ConformancePackStatusDetail"}, + "max":25, + "min":0 + }, + "ConformancePackStatusReason":{ + "type":"string", + "max":2000, + "min":0 + }, + "ConformancePackTemplateValidationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "CosmosPageLimit":{ "type":"integer", "max":100, @@ -1436,6 +1824,13 @@ "ConfigurationRecorderName":{"shape":"RecorderName"} } }, + "DeleteConformancePackRequest":{ + "type":"structure", + "required":["ConformancePackName"], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"} + } + }, "DeleteDeliveryChannelRequest":{ "type":"structure", "required":["DeliveryChannelName"], @@ -1462,6 +1857,13 @@ "OrganizationConfigRuleName":{"shape":"OrganizationConfigRuleName"} } }, + "DeleteOrganizationConformancePackRequest":{ + "type":"structure", + "required":["OrganizationConformancePackName"], + "members":{ + "OrganizationConformancePackName":{"shape":"OrganizationConformancePackName"} + } + }, "DeletePendingAggregationRequestRequest":{ "type":"structure", "required":[ @@ -1554,6 +1956,16 @@ "type":"list", "member":{"shape":"DeliveryChannelStatus"} }, + "DeliveryS3Bucket":{ + "type":"string", + "max":63, + "min":3 + }, + "DeliveryS3KeyPrefix":{ + "type":"string", + "max":1024, + "min":1 + }, "DeliveryStatus":{ "type":"string", "enum":[ @@ -1710,6 +2122,63 @@ "ConfigurationRecorders":{"shape":"ConfigurationRecorderList"} } }, + "DescribeConformancePackComplianceLimit":{ + "type":"integer", + "max":1000, + "min":0 + }, + "DescribeConformancePackComplianceRequest":{ + "type":"structure", + "required":["ConformancePackName"], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "Filters":{"shape":"ConformancePackComplianceFilters"}, + "Limit":{"shape":"DescribeConformancePackComplianceLimit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeConformancePackComplianceResponse":{ + "type":"structure", + "required":[ + "ConformancePackName", + "ConformancePackRuleComplianceList" + ], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "ConformancePackRuleComplianceList":{"shape":"ConformancePackRuleComplianceList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeConformancePackStatusRequest":{ + "type":"structure", + "members":{ + "ConformancePackNames":{"shape":"ConformancePackNamesList"}, + "Limit":{"shape":"PageSizeLimit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeConformancePackStatusResponse":{ + "type":"structure", + "members":{ + "ConformancePackStatusDetails":{"shape":"ConformancePackStatusDetailsList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeConformancePacksRequest":{ + "type":"structure", + "members":{ + "ConformancePackNames":{"shape":"ConformancePackNamesList"}, + "Limit":{"shape":"PageSizeLimit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeConformancePacksResponse":{ + "type":"structure", + "members":{ + "ConformancePackDetails":{"shape":"ConformancePackDetailList"}, + "NextToken":{"shape":"NextToken"} + } + }, "DescribeDeliveryChannelStatusRequest":{ "type":"structure", "members":{ @@ -1764,6 +2233,36 @@ "NextToken":{"shape":"String"} } }, + "DescribeOrganizationConformancePackStatusesRequest":{ + "type":"structure", + "members":{ + "OrganizationConformancePackNames":{"shape":"OrganizationConformancePackNames"}, + "Limit":{"shape":"CosmosPageLimit"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeOrganizationConformancePackStatusesResponse":{ + "type":"structure", + "members":{ + "OrganizationConformancePackStatuses":{"shape":"OrganizationConformancePackStatuses"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeOrganizationConformancePacksRequest":{ + "type":"structure", + "members":{ + "OrganizationConformancePackNames":{"shape":"OrganizationConformancePackNames"}, + "Limit":{"shape":"CosmosPageLimit"}, + "NextToken":{"shape":"String"} + } + }, + "DescribeOrganizationConformancePacksResponse":{ + "type":"structure", + "members":{ + "OrganizationConformancePacks":{"shape":"OrganizationConformancePacks"}, + "NextToken":{"shape":"String"} + } + }, "DescribePendingAggregationRequestsLimit":{ "type":"integer", "max":20, @@ -2108,6 +2607,46 @@ "ComplianceSummariesByResourceType":{"shape":"ComplianceSummariesByResourceType"} } }, + "GetConformancePackComplianceDetailsLimit":{ + "type":"integer", + "max":100, + "min":0 + }, + "GetConformancePackComplianceDetailsRequest":{ + "type":"structure", + "required":["ConformancePackName"], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "Filters":{"shape":"ConformancePackEvaluationFilters"}, + "Limit":{"shape":"GetConformancePackComplianceDetailsLimit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "GetConformancePackComplianceDetailsResponse":{ + "type":"structure", + "required":["ConformancePackName"], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "ConformancePackRuleEvaluationResults":{"shape":"ConformancePackRuleEvaluationResultsList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "GetConformancePackComplianceSummaryRequest":{ + "type":"structure", + "required":["ConformancePackNames"], + "members":{ + "ConformancePackNames":{"shape":"ConformancePackNamesToSummarizeList"}, + "Limit":{"shape":"PageSizeLimit"}, + "NextToken":{"shape":"NextToken"} + } + }, + "GetConformancePackComplianceSummaryResponse":{ + "type":"structure", + "members":{ + "ConformancePackComplianceSummaryList":{"shape":"ConformancePackComplianceSummaryList"}, + "NextToken":{"shape":"NextToken"} + } + }, "GetDiscoveredResourceCountsRequest":{ "type":"structure", "members":{ @@ -2141,6 +2680,23 @@ "NextToken":{"shape":"String"} } }, + "GetOrganizationConformancePackDetailedStatusRequest":{ + "type":"structure", + "required":["OrganizationConformancePackName"], + "members":{ + "OrganizationConformancePackName":{"shape":"OrganizationConformancePackName"}, + "Filters":{"shape":"OrganizationResourceDetailedStatusFilters"}, + "Limit":{"shape":"CosmosPageLimit"}, + "NextToken":{"shape":"String"} + } + }, + "GetOrganizationConformancePackDetailedStatusResponse":{ + "type":"structure", + "members":{ + "OrganizationConformancePackDetailedStatuses":{"shape":"OrganizationConformancePackDetailedStatuses"}, + "NextToken":{"shape":"String"} + } + }, "GetResourceConfigHistoryRequest":{ "type":"structure", "required":[ @@ -2357,6 +2913,12 @@ }, "exception":true }, + "MaxNumberOfConformancePacksExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "MaxNumberOfDeliveryChannelsExceededException":{ "type":"structure", "members":{ @@ -2369,6 +2931,12 @@ }, "exception":true }, + "MaxNumberOfOrganizationConformancePacksExceededException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "MaxNumberOfRetentionConfigurationsExceededException":{ "type":"structure", "members":{ @@ -2391,12 +2959,12 @@ "CREATE_SUCCESSFUL", "CREATE_IN_PROGRESS", "CREATE_FAILED", - "UPDATE_SUCCESSFUL", - "UPDATE_FAILED", - "UPDATE_IN_PROGRESS", "DELETE_SUCCESSFUL", "DELETE_FAILED", - "DELETE_IN_PROGRESS" + "DELETE_IN_PROGRESS", + "UPDATE_SUCCESSFUL", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED" ] }, "MemberAccountStatus":{ @@ -2462,6 +3030,12 @@ }, "exception":true }, + "NoSuchConfigRuleInConformancePackException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "NoSuchConfigurationAggregatorException":{ "type":"structure", "members":{ @@ -2474,6 +3048,12 @@ }, "exception":true }, + "NoSuchConformancePackException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "NoSuchDeliveryChannelException":{ "type":"structure", "members":{ @@ -2486,6 +3066,12 @@ }, "exception":true }, + "NoSuchOrganizationConformancePackException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, "NoSuchRemediationConfigurationException":{ "type":"structure", "members":{ @@ -2591,6 +3177,84 @@ "type":"list", "member":{"shape":"OrganizationConfigRule"} }, + "OrganizationConformancePack":{ + "type":"structure", + "required":[ + "OrganizationConformancePackName", + "OrganizationConformancePackArn", + "DeliveryS3Bucket", + "LastUpdateTime" + ], + "members":{ + "OrganizationConformancePackName":{"shape":"OrganizationConformancePackName"}, + "OrganizationConformancePackArn":{"shape":"StringWithCharLimit256"}, + "DeliveryS3Bucket":{"shape":"DeliveryS3Bucket"}, + "DeliveryS3KeyPrefix":{"shape":"DeliveryS3KeyPrefix"}, + "ConformancePackInputParameters":{"shape":"ConformancePackInputParameters"}, + "ExcludedAccounts":{"shape":"ExcludedAccounts"}, + "LastUpdateTime":{"shape":"Date"} + } + }, + "OrganizationConformancePackDetailedStatus":{ + "type":"structure", + "required":[ + "AccountId", + "ConformancePackName", + "Status" + ], + "members":{ + "AccountId":{"shape":"AccountId"}, + "ConformancePackName":{"shape":"StringWithCharLimit256"}, + "Status":{"shape":"OrganizationResourceDetailedStatus"}, + "ErrorCode":{"shape":"String"}, + "ErrorMessage":{"shape":"String"}, + "LastUpdateTime":{"shape":"Date"} + } + }, + "OrganizationConformancePackDetailedStatuses":{ + "type":"list", + "member":{"shape":"OrganizationConformancePackDetailedStatus"} + }, + "OrganizationConformancePackName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z][-a-zA-Z0-9]*" + }, + "OrganizationConformancePackNames":{ + "type":"list", + "member":{"shape":"OrganizationConformancePackName"}, + "max":25, + "min":0 + }, + "OrganizationConformancePackStatus":{ + "type":"structure", + "required":[ + "OrganizationConformancePackName", + "Status" + ], + "members":{ + "OrganizationConformancePackName":{"shape":"OrganizationConformancePackName"}, + "Status":{"shape":"OrganizationResourceStatus"}, + "ErrorCode":{"shape":"String"}, + "ErrorMessage":{"shape":"String"}, + "LastUpdateTime":{"shape":"Date"} + } + }, + "OrganizationConformancePackStatuses":{ + "type":"list", + "member":{"shape":"OrganizationConformancePackStatus"} + }, + "OrganizationConformancePackTemplateValidationException":{ + "type":"structure", + "members":{ + }, + "exception":true + }, + "OrganizationConformancePacks":{ + "type":"list", + "member":{"shape":"OrganizationConformancePack"} + }, "OrganizationCustomRuleMetadata":{ "type":"structure", "required":[ @@ -2623,18 +3287,53 @@ "TagValueScope":{"shape":"StringWithCharLimit256"} } }, - "OrganizationRuleStatus":{ + "OrganizationResourceDetailedStatus":{ "type":"string", "enum":[ "CREATE_SUCCESSFUL", "CREATE_IN_PROGRESS", "CREATE_FAILED", + "DELETE_SUCCESSFUL", + "DELETE_FAILED", + "DELETE_IN_PROGRESS", "UPDATE_SUCCESSFUL", - "UPDATE_FAILED", "UPDATE_IN_PROGRESS", + "UPDATE_FAILED" + ] + }, + "OrganizationResourceDetailedStatusFilters":{ + "type":"structure", + "members":{ + "AccountId":{"shape":"AccountId"}, + "Status":{"shape":"OrganizationResourceDetailedStatus"} + } + }, + "OrganizationResourceStatus":{ + "type":"string", + "enum":[ + "CREATE_SUCCESSFUL", + "CREATE_IN_PROGRESS", + "CREATE_FAILED", "DELETE_SUCCESSFUL", "DELETE_FAILED", - "DELETE_IN_PROGRESS" + "DELETE_IN_PROGRESS", + "UPDATE_SUCCESSFUL", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED" + ] + }, + "OrganizationRuleStatus":{ + "type":"string", + "enum":[ + "CREATE_SUCCESSFUL", + "CREATE_IN_PROGRESS", + "CREATE_FAILED", + "DELETE_SUCCESSFUL", + "DELETE_FAILED", + "DELETE_IN_PROGRESS", + "UPDATE_SUCCESSFUL", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED" ] }, "OversizedConfigurationItemException":{ @@ -2650,6 +3349,21 @@ "AWS" ] }, + "PageSizeLimit":{ + "type":"integer", + "max":20, + "min":0 + }, + "ParameterName":{ + "type":"string", + "max":255, + "min":0 + }, + "ParameterValue":{ + "type":"string", + "max":4096, + "min":0 + }, "PendingAggregationRequest":{ "type":"structure", "members":{ @@ -2716,6 +3430,27 @@ "ConfigurationRecorder":{"shape":"ConfigurationRecorder"} } }, + "PutConformancePackRequest":{ + "type":"structure", + "required":[ + "ConformancePackName", + "DeliveryS3Bucket" + ], + "members":{ + "ConformancePackName":{"shape":"ConformancePackName"}, + "TemplateS3Uri":{"shape":"TemplateS3Uri"}, + "TemplateBody":{"shape":"TemplateBody"}, + "DeliveryS3Bucket":{"shape":"DeliveryS3Bucket"}, + "DeliveryS3KeyPrefix":{"shape":"DeliveryS3KeyPrefix"}, + "ConformancePackInputParameters":{"shape":"ConformancePackInputParameters"} + } + }, + "PutConformancePackResponse":{ + "type":"structure", + "members":{ + "ConformancePackArn":{"shape":"ConformancePackArn"} + } + }, "PutDeliveryChannelRequest":{ "type":"structure", "required":["DeliveryChannel"], @@ -2754,6 +3489,28 @@ "OrganizationConfigRuleArn":{"shape":"StringWithCharLimit256"} } }, + "PutOrganizationConformancePackRequest":{ + "type":"structure", + "required":[ + "OrganizationConformancePackName", + "DeliveryS3Bucket" + ], + "members":{ + "OrganizationConformancePackName":{"shape":"OrganizationConformancePackName"}, + "TemplateS3Uri":{"shape":"TemplateS3Uri"}, + "TemplateBody":{"shape":"TemplateBody"}, + "DeliveryS3Bucket":{"shape":"DeliveryS3Bucket"}, + "DeliveryS3KeyPrefix":{"shape":"DeliveryS3KeyPrefix"}, + "ConformancePackInputParameters":{"shape":"ConformancePackInputParameters"}, + "ExcludedAccounts":{"shape":"ExcludedAccounts"} + } + }, + "PutOrganizationConformancePackResponse":{ + "type":"structure", + "members":{ + "OrganizationConformancePackArn":{"shape":"StringWithCharLimit256"} + } + }, "PutRemediationConfigurationsRequest":{ "type":"structure", "required":["RemediationConfigurations"], @@ -3306,6 +4063,11 @@ "ErrorPercentage":{"shape":"Percentage"} } }, + "StackArn":{ + "type":"string", + "max":2048, + "min":1 + }, "StartConfigRulesEvaluationRequest":{ "type":"structure", "members":{ @@ -3463,6 +4225,17 @@ "max":50, "min":0 }, + "TemplateBody":{ + "type":"string", + "max":51200, + "min":1 + }, + "TemplateS3Uri":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"s3://.*" + }, "TooManyTagsException":{ "type":"structure", "members":{ diff --git a/models/apis/config/2014-11-12/docs-2.json b/models/apis/config/2014-11-12/docs-2.json index 74426c39318..8017af42247 100644 --- a/models/apis/config/2014-11-12/docs-2.json +++ b/models/apis/config/2014-11-12/docs-2.json @@ -8,9 +8,11 @@ "DeleteConfigRule": "

Deletes the specified AWS Config rule and all of its evaluation results.

AWS Config sets the state of a rule to DELETING until the deletion is complete. You cannot update a rule while it is in this state. If you make a PutConfigRule or DeleteConfigRule request for the rule, you will receive a ResourceInUseException.

You can check the state of a rule by using the DescribeConfigRules request.

", "DeleteConfigurationAggregator": "

Deletes the specified configuration aggregator and the aggregated data associated with the aggregator.

", "DeleteConfigurationRecorder": "

Deletes the configuration recorder.

After the configuration recorder is deleted, AWS Config will not record resource configuration changes until you create a new configuration recorder.

This action does not delete the configuration information that was previously recorded. You will be able to access the previously recorded information by using the GetResourceConfigHistory action, but you will not be able to access this information in the AWS Config console until you create a new configuration recorder.

", + "DeleteConformancePack": "

Deletes the specified conformance pack and all the AWS Config rules and all evaluation results within that conformance pack.

AWS Config sets the conformance pack to DELETE_IN_PROGRESS until the deletion is complete. You cannot update a conformance pack while it is in this state.

", "DeleteDeliveryChannel": "

Deletes the delivery channel.

Before you can delete the delivery channel, you must stop the configuration recorder by using the StopConfigurationRecorder action.

", "DeleteEvaluationResults": "

Deletes the evaluation results for the specified AWS Config rule. You can specify one AWS Config rule per request. After you delete the evaluation results, you can call the StartConfigRulesEvaluation API to start evaluating your AWS resources against the rule.

", "DeleteOrganizationConfigRule": "

Deletes the specified organization config rule and all of its evaluation results from all member accounts in that organization. Only a master account can delete an organization config rule.

AWS Config sets the state of a rule to DELETE_IN_PROGRESS until the deletion is complete. You cannot update a rule while it is in this state.

", + "DeleteOrganizationConformancePack": "

Deletes the specified organization conformance pack and all of the config rules and remediation actions from all member accounts in that organization. Only a master account can delete an organization conformance pack.

AWS Config sets the state of a conformance pack to DELETE_IN_PROGRESS until the deletion is complete. You cannot update a conformance pack while it is in this state.

", "DeletePendingAggregationRequest": "

Deletes pending authorization requests for a specified aggregator account in a specified region.

", "DeleteRemediationConfiguration": "

Deletes the remediation configuration.

", "DeleteRemediationExceptions": "

Deletes one or more remediation exceptions mentioned in the resource keys.

", @@ -26,10 +28,15 @@ "DescribeConfigurationAggregators": "

Returns the details of one or more configuration aggregators. If the configuration aggregator is not specified, this action returns the details for all the configuration aggregators associated with the account.

", "DescribeConfigurationRecorderStatus": "

Returns the current status of the specified configuration recorder. If a configuration recorder is not specified, this action returns the status of all configuration recorders associated with the account.

Currently, you can specify only one configuration recorder per region in your account.

", "DescribeConfigurationRecorders": "

Returns the details for the specified configuration recorders. If the configuration recorder is not specified, this action returns the details for all configuration recorders associated with the account.

Currently, you can specify only one configuration recorder per region in your account.

", + "DescribeConformancePackCompliance": "

Returns compliance information for each rule in that conformance pack.

You must provide exact rule names otherwise AWS Config cannot return evaluation results due to insufficient data.

", + "DescribeConformancePackStatus": "

Provides one or more conformance packs deployment status.

", + "DescribeConformancePacks": "

Returns a list of one or more conformance packs.

", "DescribeDeliveryChannelStatus": "

Returns the current status of the specified delivery channel. If a delivery channel is not specified, this action returns the current status of all delivery channels associated with the account.

Currently, you can specify only one delivery channel per region in your account.

", "DescribeDeliveryChannels": "

Returns details about the specified delivery channel. If a delivery channel is not specified, this action returns the details of all delivery channels associated with the account.

Currently, you can specify only one delivery channel per region in your account.

", "DescribeOrganizationConfigRuleStatuses": "

Provides organization config rule deployment status for an organization.

The status is not considered successful until organization config rule is successfully deployed in all the member accounts with an exception of excluded accounts.

When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization config rule names. It is only applicable, when you request all the organization config rules.

Only a master account can call this API.

", "DescribeOrganizationConfigRules": "

Returns a list of organization config rules.

When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization config rule names. It is only applicable, when you request all the organization config rules.

Only a master account can call this API.

", + "DescribeOrganizationConformancePackStatuses": "

Provides organization conformance pack deployment status for an organization.

The status is not considered successful until organization conformance pack is successfully deployed in all the member accounts with an exception of excluded accounts.

When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization conformance pack names. They are only applicable, when you request all the organization conformance packs.

Only a master account can call this API.

", + "DescribeOrganizationConformancePacks": "

Returns a list of organization conformance packs.

When you specify the limit and the next token, you receive a paginated response. Limit and next token are not applicable if you specify organization conformance packs names. They are only applicable, when you request all the organization conformance packs. Only a master account can call this API.

", "DescribePendingAggregationRequests": "

Returns a list of all pending aggregation requests.

", "DescribeRemediationConfigurations": "

Returns the details of one or more remediation configurations.

", "DescribeRemediationExceptions": "

Returns the details of one or more remediation exceptions. A detailed view of a remediation exception for a set of resources that includes an explanation of an exception and the time when the exception will be deleted. When you specify the limit and the next token, you receive a paginated response.

When you specify the limit and the next token, you receive a paginated response.

Limit and next token are not applicable if you request resources in batch. It is only applicable, when you request all resources.

", @@ -43,8 +50,11 @@ "GetComplianceDetailsByResource": "

Returns the evaluation results for the specified AWS resource. The results indicate which AWS Config rules were used to evaluate the resource, when each rule was last used, and whether the resource complies with each rule.

", "GetComplianceSummaryByConfigRule": "

Returns the number of AWS Config rules that are compliant and noncompliant, up to a maximum of 25 for each.

", "GetComplianceSummaryByResourceType": "

Returns the number of resources that are compliant and the number that are noncompliant. You can specify one or more resource types to get these numbers for each resource type. The maximum number returned is 100.

", + "GetConformancePackComplianceDetails": "

Returns compliance details of a conformance pack for all AWS resources that are monitered by conformance pack.

", + "GetConformancePackComplianceSummary": null, "GetDiscoveredResourceCounts": "

Returns the resource types, the number of each resource type, and the total number of resources that AWS Config is recording in this region for your AWS account.

Example

  1. AWS Config is recording three resource types in the US East (Ohio) Region for your account: 25 EC2 instances, 20 IAM users, and 15 S3 buckets.

  2. You make a call to the GetDiscoveredResourceCounts action and specify that you want all resource types.

  3. AWS Config returns the following:

The response is paginated. By default, AWS Config lists 100 ResourceCount objects on each page. You can customize this number with the limit parameter. The response includes a nextToken string. To get the next page of results, run the request again and specify the string for the nextToken parameter.

If you make a call to the GetDiscoveredResourceCounts action, you might not immediately receive resource counts in the following situations:

It might take a few minutes for AWS Config to record and count your resources. Wait a few minutes and then retry the GetDiscoveredResourceCounts action.

", "GetOrganizationConfigRuleDetailedStatus": "

Returns detailed status for each member account within an organization for a given organization config rule.

Only a master account can call this API.

", + "GetOrganizationConformancePackDetailedStatus": "

Returns detailed status for each member account within an organization for a given organization conformance pack.

Only a master account can call this API.

", "GetResourceConfigHistory": "

Returns a list of configuration items for the specified resource. The list contains details about each state of the resource during the specified time interval. If you specified a retention period to retain your ConfigurationItems between a minimum of 30 days and a maximum of 7 years (2557 days), AWS Config returns the ConfigurationItems for the specified retention period.

The response is paginated. By default, AWS Config returns a limit of 10 configuration items per page. You can customize this number with the limit parameter. The response includes a nextToken string. To get the next page of results, run the request again and specify the string for the nextToken parameter.

Each call to the API is limited to span a duration of seven days. It is likely that the number of records returned is smaller than the specified limit. In such cases, you can make another call, using the nextToken.

", "ListAggregateDiscoveredResources": "

Accepts a resource type and returns a list of resource identifiers that are aggregated for a specific resource type across accounts and regions. A resource identifier includes the resource type, ID, (if available) the custom resource name, source account, and source region. You can narrow the results to include only resources that have specific resource IDs, or a resource name, or source account ID, or source region.

For example, if the input consists of accountID 12345678910 and the region is us-east-1 for resource type AWS::EC2::Instance then the API returns all the EC2 instance identifiers of accountID 12345678910 and region us-east-1.

", "ListDiscoveredResources": "

Accepts a resource type and returns a list of resource identifiers for the resources of that type. A resource identifier includes the resource type, ID, and (if available) the custom resource name. The results consist of resources that AWS Config has discovered, including those that AWS Config is not currently recording. You can narrow the results to include only resources that have specific resource IDs or a resource name.

You can specify either resource IDs or a resource name, but not both, in the same request.

The response is paginated. By default, AWS Config lists 100 resource identifiers on each page. You can customize this number with the limit parameter. The response includes a nextToken string. To get the next page of results, run the request again and specify the string for the nextToken parameter.

", @@ -53,9 +63,11 @@ "PutConfigRule": "

Adds or updates an AWS Config rule for evaluating whether your AWS resources comply with your desired configurations.

You can use this action for custom AWS Config rules and AWS managed Config rules. A custom AWS Config rule is a rule that you develop and maintain. An AWS managed Config rule is a customizable, predefined rule that AWS Config provides.

If you are adding a new custom AWS Config rule, you must first create the AWS Lambda function that the rule invokes to evaluate your resources. When you use the PutConfigRule action to add the rule to AWS Config, you must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the function. Specify the ARN for the SourceIdentifier key. This key is part of the Source object, which is part of the ConfigRule object.

If you are adding an AWS managed Config rule, specify the rule's identifier for the SourceIdentifier key. To reference AWS managed Config rule identifiers, see About AWS Managed Config Rules.

For any new rule that you add, specify the ConfigRuleName in the ConfigRule object. Do not specify the ConfigRuleArn or the ConfigRuleId. These values are generated by AWS Config for new rules.

If you are updating a rule that you added previously, you can specify the rule by ConfigRuleName, ConfigRuleId, or ConfigRuleArn in the ConfigRule data type that you use in this request.

The maximum number of rules that AWS Config supports is 150.

For information about requesting a rule limit increase, see AWS Config Limits in the AWS General Reference Guide.

For more information about developing and using AWS Config rules, see Evaluating AWS Resource Configurations with AWS Config in the AWS Config Developer Guide.

", "PutConfigurationAggregator": "

Creates and updates the configuration aggregator with the selected source accounts and regions. The source account can be individual account(s) or an organization.

AWS Config should be enabled in source accounts and regions you want to aggregate.

If your source type is an organization, you must be signed in to the master account and all features must be enabled in your organization. AWS Config calls EnableAwsServiceAccess API to enable integration between AWS Config and AWS Organizations.

", "PutConfigurationRecorder": "

Creates a new configuration recorder to record the selected resource configurations.

You can use this action to change the role roleARN or the recordingGroup of an existing recorder. To change the role, call the action on the existing configuration recorder and specify a role.

Currently, you can specify only one configuration recorder per region in your account.

If ConfigurationRecorder does not have the recordingGroup parameter specified, the default is to record all supported resource types.

", + "PutConformancePack": "

Creates or updates a conformance pack. A conformance pack is a collection of AWS Config rules that can be easily deployed in an account and a region.

This API creates a service linked role AWSServiceRoleForConfigConforms in your account. The service linked role is created only when the role does not exist in your account. AWS Config verifies the existence of role with GetRole action.

You must specify either the TemplateS3Uri or the TemplateBody parameter, but not both. If you provide both AWS Config uses the TemplateS3Uri parameter and ignores the TemplateBody parameter.

", "PutDeliveryChannel": "

Creates a delivery channel object to deliver configuration information to an Amazon S3 bucket and Amazon SNS topic.

Before you can create a delivery channel, you must create a configuration recorder.

You can use this action to change the Amazon S3 bucket or an Amazon SNS topic of the existing delivery channel. To change the Amazon S3 bucket or an Amazon SNS topic, call this action and specify the changed values for the S3 bucket and the SNS topic. If you specify a different value for either the S3 bucket or the SNS topic, this action will keep the existing value for the parameter that is not changed.

You can have only one delivery channel per region in your account.

", "PutEvaluations": "

Used by an AWS Lambda function to deliver evaluation results to AWS Config. This action is required in every AWS Lambda function that is invoked by an AWS Config rule.

", "PutOrganizationConfigRule": "

Adds or updates organization config rule for your entire organization evaluating whether your AWS resources comply with your desired configurations. Only a master account can create or update an organization config rule.

This API enables organization service access through the EnableAWSServiceAccess action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup in the master account of your organization. The service linked role is created only when the role does not exist in the master account. AWS Config verifies the existence of role with GetRole action.

You can use this action to create both custom AWS Config rules and AWS managed Config rules. If you are adding a new custom AWS Config rule, you must first create AWS Lambda function in the master account that the rule invokes to evaluate your resources. When you use the PutOrganizationConfigRule action to add the rule to AWS Config, you must specify the Amazon Resource Name (ARN) that AWS Lambda assigns to the function. If you are adding an AWS managed Config rule, specify the rule's identifier for the RuleIdentifier key.

The maximum number of organization config rules that AWS Config supports is 150.

Specify either OrganizationCustomRuleMetadata or OrganizationManagedRuleMetadata.

", + "PutOrganizationConformancePack": "

Deploys conformance packs across member accounts in an AWS Organization.

This API enables organization service access through the EnableAWSServiceAccess action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup in the master account of your organization. The service linked role is created only when the role does not exist in the master account. AWS Config verifies the existence of role with GetRole action.

The SPN is config-multiaccountsetup.amazonaws.com.

You must specify either the TemplateS3Uri or the TemplateBody parameter, but not both. If you provide both AWS Config uses the TemplateS3Uri parameter and ignores the TemplateBody parameter.

", "PutRemediationConfigurations": "

Adds or updates the remediation configuration with a specific AWS Config rule with the selected target or action. The API creates the RemediationConfiguration object for the AWS Config rule. The AWS Config rule must already exist for you to add a remediation configuration. The target (SSM document) must exist and have permissions to use the target.

", "PutRemediationExceptions": "

A remediation exception is when a specific resource is no longer considered for auto-remediation. This API adds a new exception or updates an exisiting exception for a specific resource with a specific AWS Config rule.

", "PutRetentionConfiguration": "

Creates and updates the retention configuration with details about retention period (number of days) that AWS Config stores your historical information. The API creates the RetentionConfiguration object and names the object as default. When you have a RetentionConfiguration object named default, calling the API modifies the default object.

Currently, AWS Config supports only one retention configuration per region in your account.

", @@ -111,6 +123,8 @@ "ExcludedAccounts$member": null, "GetAggregateComplianceDetailsByConfigRuleRequest$AccountId": "

The 12-digit account ID of the source account.

", "MemberAccountStatus$AccountId": "

The 12-digit account ID of a member account.

", + "OrganizationConformancePackDetailedStatus$AccountId": "

The 12-digit account ID of a member account.

", + "OrganizationResourceDetailedStatusFilters$AccountId": "

The 12-digit account ID of the member account within an organization.

", "PendingAggregationRequest$RequesterAccountId": "

The 12-digit account ID of the account requesting to aggregate data.

", "PutAggregationAuthorizationRequest$AuthorizedAccountId": "

The 12-digit account ID of the account authorized to aggregate data.

", "ResourceCountFilters$AccountId": "

The 12-digit ID of the account.

", @@ -228,6 +242,12 @@ "UntagResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are ConfigRule, ConfigurationAggregator and AggregatorAuthorization.

" } }, + "Annotation": { + "base": null, + "refs": { + "ConformancePackEvaluationResult$Annotation": "

Supplementary information about how the evaluation determined the compliance.

" + } + }, "AutoRemediationAttemptSeconds": { "base": null, "refs": { @@ -477,6 +497,7 @@ "AggregateComplianceByConfigRule$ConfigRuleName": "

The name of the AWS Config rule.

", "ConfigRuleComplianceFilters$ConfigRuleName": "

The name of the AWS Config rule.

", "ConfigRuleNames$member": null, + "ConformancePackRuleCompliance$ConfigRuleName": "

Filters the results by AWS Config rule name.

", "DeleteRemediationConfigurationRequest$ConfigRuleName": "

The name of the AWS Config rule for which you want to delete remediation configuration.

", "DeleteRemediationExceptionsRequest$ConfigRuleName": "

The name of the AWS Config rule for which you want to delete remediation exception configuration.

", "DescribeRemediationExceptionsRequest$ConfigRuleName": "

The name of the AWS Config rule.

", @@ -642,12 +663,186 @@ "ConfigurationItem$configurationStateId": "

An identifier that indicates the ordering of the configuration items of a resource.

" } }, + "ConformancePackArn": { + "base": null, + "refs": { + "ConformancePackDetail$ConformancePackArn": "

Amazon Resource Name (ARN) of the conformance pack.

", + "ConformancePackStatusDetail$ConformancePackArn": "

Amazon Resource Name (ARN) of comformance pack.

", + "PutConformancePackResponse$ConformancePackArn": "

ARN of the conformance pack.

" + } + }, + "ConformancePackComplianceFilters": { + "base": "

Filters the conformance pack by compliance types and AWS Config rule names.

", + "refs": { + "DescribeConformancePackComplianceRequest$Filters": "

A ConformancePackComplianceFilters object.

" + } + }, + "ConformancePackComplianceResourceIds": { + "base": null, + "refs": { + "ConformancePackEvaluationFilters$ResourceIds": "

Filters the results by resource IDs.

" + } + }, + "ConformancePackComplianceSummary": { + "base": null, + "refs": { + "ConformancePackComplianceSummaryList$member": null + } + }, + "ConformancePackComplianceSummaryList": { + "base": null, + "refs": { + "GetConformancePackComplianceSummaryResponse$ConformancePackComplianceSummaryList": null + } + }, + "ConformancePackComplianceType": { + "base": null, + "refs": { + "ConformancePackComplianceFilters$ComplianceType": "

Filters the results by compliance.

The allowed values are COMPLIANT and NON_COMPLIANT.

", + "ConformancePackComplianceSummary$ConformancePackComplianceStatus": null, + "ConformancePackEvaluationFilters$ComplianceType": "

Filters the results by compliance.

The allowed values are COMPLIANT and NON_COMPLIANT.

", + "ConformancePackEvaluationResult$ComplianceType": "

Filters the results by compliance.

The allowed values are COMPLIANT and NON_COMPLIANT.

", + "ConformancePackRuleCompliance$ComplianceType": "

Filters the results by compliance.

The allowed values are COMPLIANT and NON_COMPLIANT.

" + } + }, + "ConformancePackConfigRuleNames": { + "base": null, + "refs": { + "ConformancePackComplianceFilters$ConfigRuleNames": "

Filters the results by AWS Config rule names.

", + "ConformancePackEvaluationFilters$ConfigRuleNames": "

Filters the results by AWS Config rule names.

" + } + }, + "ConformancePackDetail": { + "base": "

Returns details of a conformance pack. A conformance pack is a collection of AWS Config rules that can be easily deployed in an account and a region.

", + "refs": { + "ConformancePackDetailList$member": null + } + }, + "ConformancePackDetailList": { + "base": null, + "refs": { + "DescribeConformancePacksResponse$ConformancePackDetails": "

Returns a list of ConformancePackDetail objects.

" + } + }, + "ConformancePackEvaluationFilters": { + "base": "

Filters a conformance pack by AWS Config rule names, compliance types, AWS resource types, and resource IDs.

", + "refs": { + "GetConformancePackComplianceDetailsRequest$Filters": "

A ConformancePackEvaluationFilters object.

" + } + }, + "ConformancePackEvaluationResult": { + "base": "

The details of a conformance pack evaluation. Provides AWS Config rule and AWS resource type that was evaluated, the compliance of the conformance pack, related time stamps, and supplementary information.

", + "refs": { + "ConformancePackRuleEvaluationResultsList$member": null + } + }, + "ConformancePackId": { + "base": null, + "refs": { + "ConformancePackDetail$ConformancePackId": "

ID of the conformance pack.

", + "ConformancePackStatusDetail$ConformancePackId": "

ID of the conformance pack.

" + } + }, + "ConformancePackInputParameter": { + "base": "

Input parameters in the form of key-value pairs for the conformance pack, both of which you define. Keys can have a maximum character length of 128 characters, and values can have a maximum length of 256 characters.

", + "refs": { + "ConformancePackInputParameters$member": null + } + }, + "ConformancePackInputParameters": { + "base": null, + "refs": { + "ConformancePackDetail$ConformancePackInputParameters": "

A list of ConformancePackInputParameter objects.

", + "OrganizationConformancePack$ConformancePackInputParameters": "

A list of ConformancePackInputParameter objects.

", + "PutConformancePackRequest$ConformancePackInputParameters": "

A list of ConformancePackInputParameter objects.

", + "PutOrganizationConformancePackRequest$ConformancePackInputParameters": "

A list of ConformancePackInputParameter objects.

" + } + }, + "ConformancePackName": { + "base": null, + "refs": { + "ConformancePackComplianceSummary$ConformancePackName": null, + "ConformancePackDetail$ConformancePackName": "

Name of the conformance pack.

", + "ConformancePackNamesList$member": null, + "ConformancePackNamesToSummarizeList$member": null, + "ConformancePackStatusDetail$ConformancePackName": "

Name of the conformance pack.

", + "DeleteConformancePackRequest$ConformancePackName": "

Name of the conformance pack you want to delete.

", + "DescribeConformancePackComplianceRequest$ConformancePackName": "

Name of the conformance pack.

", + "DescribeConformancePackComplianceResponse$ConformancePackName": "

Name of the conformance pack.

", + "GetConformancePackComplianceDetailsRequest$ConformancePackName": "

Name of the conformance pack.

", + "GetConformancePackComplianceDetailsResponse$ConformancePackName": "

Name of the conformance pack.

", + "PutConformancePackRequest$ConformancePackName": "

Name of the conformance pack you want to create.

" + } + }, + "ConformancePackNamesList": { + "base": null, + "refs": { + "DescribeConformancePackStatusRequest$ConformancePackNames": "

Comma-separated list of conformance pack names.

", + "DescribeConformancePacksRequest$ConformancePackNames": "

Comma-separated list of conformance pack names for which you want details. If you do not specify any names, AWS Config returns details for all your conformance packs.

" + } + }, + "ConformancePackNamesToSummarizeList": { + "base": null, + "refs": { + "GetConformancePackComplianceSummaryRequest$ConformancePackNames": null + } + }, + "ConformancePackRuleCompliance": { + "base": "

Compliance information of one or more AWS Config rules within a conformance pack. You can filter using AWS Config rule names and compliance types.

", + "refs": { + "ConformancePackRuleComplianceList$member": null + } + }, + "ConformancePackRuleComplianceList": { + "base": null, + "refs": { + "DescribeConformancePackComplianceResponse$ConformancePackRuleComplianceList": "

Returns a list of ConformancePackRuleCompliance objects.

" + } + }, + "ConformancePackRuleEvaluationResultsList": { + "base": null, + "refs": { + "GetConformancePackComplianceDetailsResponse$ConformancePackRuleEvaluationResults": "

Returns a list of ConformancePackEvaluationResult objects.

" + } + }, + "ConformancePackState": { + "base": null, + "refs": { + "ConformancePackStatusDetail$ConformancePackState": "

Indicates deployment status of conformance pack.

AWS Config sets the state of the conformance pack to:

" + } + }, + "ConformancePackStatusDetail": { + "base": "

Status details of a conformance pack.

", + "refs": { + "ConformancePackStatusDetailsList$member": null + } + }, + "ConformancePackStatusDetailsList": { + "base": null, + "refs": { + "DescribeConformancePackStatusResponse$ConformancePackStatusDetails": "

A list of ConformancePackStatusDetail objects.

" + } + }, + "ConformancePackStatusReason": { + "base": null, + "refs": { + "ConformancePackStatusDetail$ConformancePackStatusReason": "

The reason of conformance pack creation failure.

" + } + }, + "ConformancePackTemplateValidationException": { + "base": "

You have specified a template that is not valid or supported.

", + "refs": { + } + }, "CosmosPageLimit": { "base": null, "refs": { "DescribeOrganizationConfigRuleStatusesRequest$Limit": "

The maximum number of OrganizationConfigRuleStatuses returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.

", "DescribeOrganizationConfigRulesRequest$Limit": "

The maximum number of organization config rules returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.

", - "GetOrganizationConfigRuleDetailedStatusRequest$Limit": "

The maximum number of OrganizationConfigRuleDetailedStatus returned on each page. If you do not specify a number, AWS Config uses the default. The default is 100.

" + "DescribeOrganizationConformancePackStatusesRequest$Limit": "

The maximum number of OrganizationConformancePackStatuses returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.

", + "DescribeOrganizationConformancePacksRequest$Limit": "

The maximum number of organization config packs returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.

", + "GetOrganizationConfigRuleDetailedStatusRequest$Limit": "

The maximum number of OrganizationConfigRuleDetailedStatus returned on each page. If you do not specify a number, AWS Config uses the default. The default is 100.

", + "GetOrganizationConformancePackDetailedStatusRequest$Limit": "

The maximum number of OrganizationConformancePackDetailedStatuses returned on each page. If you do not specify a number, AWS Config uses the default. The default is 100.

" } }, "Date": { @@ -672,12 +867,20 @@ "ConfigurationRecorderStatus$lastStartTime": "

The time the recorder was last started.

", "ConfigurationRecorderStatus$lastStopTime": "

The time the recorder was last stopped.

", "ConfigurationRecorderStatus$lastStatusChangeTime": "

The time when the status was last changed.

", + "ConformancePackDetail$LastUpdateRequestedTime": "

Last time when conformation pack update was requested.

", + "ConformancePackEvaluationResult$ConfigRuleInvokedTime": "

The time when AWS Config rule evaluated AWS resource.

", + "ConformancePackEvaluationResult$ResultRecordedTime": "

The time when AWS Config recorded the evaluation result.

", + "ConformancePackStatusDetail$LastUpdateRequestedTime": "

Last time when conformation pack creation and update was requested.

", + "ConformancePackStatusDetail$LastUpdateCompletedTime": "

Last time when conformation pack creation and update was successful.

", "EvaluationResult$ResultRecordedTime": "

The time when AWS Config recorded the evaluation result.

", "EvaluationResult$ConfigRuleInvokedTime": "

The time when the AWS Config rule evaluated the AWS resource.

", "EvaluationResultIdentifier$OrderingTimestamp": "

The time of the event that triggered the evaluation of your AWS resources. The time can indicate when AWS Config delivered a configuration item change notification, or it can indicate when AWS Config delivered the configuration snapshot, depending on which event triggered the evaluation.

", "MemberAccountStatus$LastUpdateTime": "

The timestamp of the last status update.

", "OrganizationConfigRule$LastUpdateTime": "

The timestamp of the last update.

", "OrganizationConfigRuleStatus$LastUpdateTime": "

The timestamp of the last update.

", + "OrganizationConformancePack$LastUpdateTime": "

Last time when organization conformation pack was updated.

", + "OrganizationConformancePackDetailedStatus$LastUpdateTime": "

The timestamp of the last status update.

", + "OrganizationConformancePackStatus$LastUpdateTime": "

The timestamp of the last update.

", "PutRemediationExceptionsRequest$ExpirationTime": "

The exception is automatically deleted after the expiration date.

", "RemediationException$ExpirationTime": "

The time when the remediation exception will be deleted.

", "RemediationExecutionStatus$InvocationTime": "

Start time when the remediation was executed.

", @@ -706,6 +909,11 @@ "refs": { } }, + "DeleteConformancePackRequest": { + "base": null, + "refs": { + } + }, "DeleteDeliveryChannelRequest": { "base": "

The input for the DeleteDeliveryChannel action. The action accepts the following data, in JSON format.

", "refs": { @@ -726,6 +934,11 @@ "refs": { } }, + "DeleteOrganizationConformancePackRequest": { + "base": null, + "refs": { + } + }, "DeletePendingAggregationRequestRequest": { "base": null, "refs": { @@ -798,6 +1011,24 @@ "DescribeDeliveryChannelStatusResponse$DeliveryChannelsStatus": "

A list that contains the status of a specified delivery channel.

" } }, + "DeliveryS3Bucket": { + "base": null, + "refs": { + "ConformancePackDetail$DeliveryS3Bucket": "

Location of an Amazon S3 bucket where AWS Config can deliver evaluation results and conformance pack template that is used to create a pack.

", + "OrganizationConformancePack$DeliveryS3Bucket": "

Location of an Amazon S3 bucket where AWS Config can deliver evaluation results and conformance pack template that is used to create a pack.

", + "PutConformancePackRequest$DeliveryS3Bucket": "

Location of an Amazon S3 bucket where AWS Config can deliver evaluation results. AWS Config stores intermediate files while processing conformance pack template.

", + "PutOrganizationConformancePackRequest$DeliveryS3Bucket": "

Location of an Amazon S3 bucket where AWS Config can deliver evaluation results. AWS Config stores intermediate files while processing conformance pack template.

" + } + }, + "DeliveryS3KeyPrefix": { + "base": null, + "refs": { + "ConformancePackDetail$DeliveryS3KeyPrefix": "

Any folder structure you want to add to an Amazon S3 bucket.

", + "OrganizationConformancePack$DeliveryS3KeyPrefix": "

Any folder structure you want to add to an Amazon S3 bucket.

", + "PutConformancePackRequest$DeliveryS3KeyPrefix": "

The prefix for the Amazon S3 bucket.

", + "PutOrganizationConformancePackRequest$DeliveryS3KeyPrefix": "

The prefix for the Amazon S3 bucket.

" + } + }, "DeliveryStatus": { "base": null, "refs": { @@ -905,6 +1136,42 @@ "refs": { } }, + "DescribeConformancePackComplianceLimit": { + "base": null, + "refs": { + "DescribeConformancePackComplianceRequest$Limit": "

The maximum number of AWS Config rules within a conformance pack are returned on each page.

" + } + }, + "DescribeConformancePackComplianceRequest": { + "base": null, + "refs": { + } + }, + "DescribeConformancePackComplianceResponse": { + "base": null, + "refs": { + } + }, + "DescribeConformancePackStatusRequest": { + "base": null, + "refs": { + } + }, + "DescribeConformancePackStatusResponse": { + "base": null, + "refs": { + } + }, + "DescribeConformancePacksRequest": { + "base": null, + "refs": { + } + }, + "DescribeConformancePacksResponse": { + "base": null, + "refs": { + } + }, "DescribeDeliveryChannelStatusRequest": { "base": "

The input for the DeliveryChannelStatus action.

", "refs": { @@ -945,6 +1212,26 @@ "refs": { } }, + "DescribeOrganizationConformancePackStatusesRequest": { + "base": null, + "refs": { + } + }, + "DescribeOrganizationConformancePackStatusesResponse": { + "base": null, + "refs": { + } + }, + "DescribeOrganizationConformancePacksRequest": { + "base": null, + "refs": { + } + }, + "DescribeOrganizationConformancePacksResponse": { + "base": null, + "refs": { + } + }, "DescribePendingAggregationRequestsLimit": { "base": null, "refs": { @@ -1035,6 +1322,7 @@ "base": "

Uniquely identifies an evaluation result.

", "refs": { "AggregateEvaluationResult$EvaluationResultIdentifier": "

Uniquely identifies the evaluation result.

", + "ConformancePackEvaluationResult$EvaluationResultIdentifier": null, "EvaluationResult$EvaluationResultIdentifier": "

Uniquely identifies the evaluation result.

" } }, @@ -1068,7 +1356,9 @@ "base": null, "refs": { "OrganizationConfigRule$ExcludedAccounts": "

A comma-separated list of accounts excluded from organization config rule.

", - "PutOrganizationConfigRuleRequest$ExcludedAccounts": "

A comma-separated list of accounts that you want to exclude from an organization config rule.

" + "OrganizationConformancePack$ExcludedAccounts": "

A comma-separated list of accounts excluded from organization conformance pack.

", + "PutOrganizationConfigRuleRequest$ExcludedAccounts": "

A comma-separated list of accounts that you want to exclude from an organization config rule.

", + "PutOrganizationConformancePackRequest$ExcludedAccounts": "

A list of AWS accounts to be excluded from an organization conformance pack while deploying a conformance pack.

" } }, "ExecutionControls": { @@ -1212,6 +1502,32 @@ "refs": { } }, + "GetConformancePackComplianceDetailsLimit": { + "base": null, + "refs": { + "GetConformancePackComplianceDetailsRequest$Limit": "

The maximum number of evaluation results returned on each page. If you do no specify a number, AWS Config uses the default. The default is 100.

" + } + }, + "GetConformancePackComplianceDetailsRequest": { + "base": null, + "refs": { + } + }, + "GetConformancePackComplianceDetailsResponse": { + "base": null, + "refs": { + } + }, + "GetConformancePackComplianceSummaryRequest": { + "base": null, + "refs": { + } + }, + "GetConformancePackComplianceSummaryResponse": { + "base": null, + "refs": { + } + }, "GetDiscoveredResourceCountsRequest": { "base": null, "refs": { @@ -1232,6 +1548,16 @@ "refs": { } }, + "GetOrganizationConformancePackDetailedStatusRequest": { + "base": null, + "refs": { + } + }, + "GetOrganizationConformancePackDetailedStatusResponse": { + "base": null, + "refs": { + } + }, "GetResourceConfigHistoryRequest": { "base": "

The input for the GetResourceConfigHistory action.

", "refs": { @@ -1274,7 +1600,7 @@ } }, "InsufficientPermissionsException": { - "base": "

Indicates one of the following errors:

", + "base": "

Indicates one of the following errors:

", "refs": { } }, @@ -1428,6 +1754,11 @@ "refs": { } }, + "MaxNumberOfConformancePacksExceededException": { + "base": "

You have reached the limit (20) of the number of conformance packs in an account.

", + "refs": { + } + }, "MaxNumberOfDeliveryChannelsExceededException": { "base": "

You have reached the limit of the number of delivery channels you can create.

", "refs": { @@ -1438,6 +1769,11 @@ "refs": { } }, + "MaxNumberOfOrganizationConformancePacksExceededException": { + "base": "

You have reached the limit (10) of the number of organization conformance packs in an account.

", + "refs": { + } + }, "MaxNumberOfRetentionConfigurationsExceededException": { "base": "

Failed to add the retention configuration because a retention configuration with that name already exists.

", "refs": { @@ -1485,6 +1821,12 @@ "DescribeAggregateComplianceByConfigRulesResponse$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "DescribeComplianceByResourceRequest$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "DescribeComplianceByResourceResponse$NextToken": "

The string that you use in a subsequent request to get the next page of results in a paginated response.

", + "DescribeConformancePackComplianceRequest$NextToken": "

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

", + "DescribeConformancePackComplianceResponse$NextToken": "

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

", + "DescribeConformancePackStatusRequest$NextToken": "

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

", + "DescribeConformancePackStatusResponse$NextToken": "

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

", + "DescribeConformancePacksRequest$NextToken": "

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

", + "DescribeConformancePacksResponse$NextToken": "

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

", "DescribeRetentionConfigurationsRequest$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "DescribeRetentionConfigurationsResponse$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "GetAggregateComplianceDetailsByConfigRuleRequest$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", @@ -1495,6 +1837,10 @@ "GetAggregateDiscoveredResourceCountsResponse$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "GetComplianceDetailsByConfigRuleRequest$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "GetComplianceDetailsByConfigRuleResponse$NextToken": "

The string that you use in a subsequent request to get the next page of results in a paginated response.

", + "GetConformancePackComplianceDetailsRequest$NextToken": "

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

", + "GetConformancePackComplianceDetailsResponse$NextToken": "

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

", + "GetConformancePackComplianceSummaryRequest$NextToken": null, + "GetConformancePackComplianceSummaryResponse$NextToken": null, "GetDiscoveredResourceCountsRequest$nextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "GetDiscoveredResourceCountsResponse$nextToken": "

The string that you use in a subsequent request to get the next page of results in a paginated response.

", "GetResourceConfigHistoryRequest$nextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", @@ -1539,6 +1885,11 @@ "refs": { } }, + "NoSuchConfigRuleInConformancePackException": { + "base": "

AWS Config rule that you passed in the filter does not exist.

", + "refs": { + } + }, "NoSuchConfigurationAggregatorException": { "base": "

You have specified a configuration aggregator that does not exist.

", "refs": { @@ -1549,6 +1900,11 @@ "refs": { } }, + "NoSuchConformancePackException": { + "base": "

You specified one or more conformance packs that do not exist.

", + "refs": { + } + }, "NoSuchDeliveryChannelException": { "base": "

You have specified a delivery channel that does not exist.

", "refs": { @@ -1559,6 +1915,11 @@ "refs": { } }, + "NoSuchOrganizationConformancePackException": { + "base": "

AWS Config organization conformance pack that you passed in the filter does not exist.

For DeleteOrganizationConformancePack, you tried to delete an organization conformance pack that does not exist.

", + "refs": { + } + }, "NoSuchRemediationConfigurationException": { "base": "

You specified an AWS Config rule without a remediation configuration.

", "refs": { @@ -1581,7 +1942,7 @@ } }, "OrganizationAccessDeniedException": { - "base": "

For PutConfigAggregator API, no permission to call EnableAWSServiceAccess API.

For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.

", + "base": "

For PutConfigAggregator API, no permission to call EnableAWSServiceAccess API.

For all OrganizationConfigRule and OrganizationConformancePack APIs, AWS Config throws an exception if APIs are called from member accounts. All APIs must be called from organization master account.

", "refs": { } }, @@ -1653,7 +2014,66 @@ "OrganizationConfigRules": { "base": null, "refs": { - "DescribeOrganizationConfigRulesResponse$OrganizationConfigRules": "

Retuns a list OrganizationConfigRule objects.

" + "DescribeOrganizationConfigRulesResponse$OrganizationConfigRules": "

Returns a list of OrganizationConfigRule objects.

" + } + }, + "OrganizationConformancePack": { + "base": "

An organization conformance pack that has information about conformance packs that AWS Config creates in member accounts.

", + "refs": { + "OrganizationConformancePacks$member": null + } + }, + "OrganizationConformancePackDetailedStatus": { + "base": "

Organization conformance pack creation or deletion status in each member account. This includes the name of the conformance pack, the status, error code and error message when the conformance pack creation or deletion failed.

", + "refs": { + "OrganizationConformancePackDetailedStatuses$member": null + } + }, + "OrganizationConformancePackDetailedStatuses": { + "base": null, + "refs": { + "GetOrganizationConformancePackDetailedStatusResponse$OrganizationConformancePackDetailedStatuses": "

A list of OrganizationConformancePackDetailedStatus objects.

" + } + }, + "OrganizationConformancePackName": { + "base": null, + "refs": { + "DeleteOrganizationConformancePackRequest$OrganizationConformancePackName": "

The name of organization conformance pack that you want to delete.

", + "GetOrganizationConformancePackDetailedStatusRequest$OrganizationConformancePackName": "

The name of organization conformance pack for which you want status details for member accounts.

", + "OrganizationConformancePack$OrganizationConformancePackName": "

The name you assign to an organization conformance pack.

", + "OrganizationConformancePackNames$member": null, + "OrganizationConformancePackStatus$OrganizationConformancePackName": "

The name that you assign to organization conformance pack.

", + "PutOrganizationConformancePackRequest$OrganizationConformancePackName": "

Name of the organization conformance pack you want to create.

" + } + }, + "OrganizationConformancePackNames": { + "base": null, + "refs": { + "DescribeOrganizationConformancePackStatusesRequest$OrganizationConformancePackNames": "

The names of organization conformance packs for which you want status details. If you do not specify any names, AWS Config returns details for all your organization conformance packs.

", + "DescribeOrganizationConformancePacksRequest$OrganizationConformancePackNames": "

The name that you assign to an organization conformance pack.

" + } + }, + "OrganizationConformancePackStatus": { + "base": "

Returns the status for an organization conformance pack in an organization.

", + "refs": { + "OrganizationConformancePackStatuses$member": null + } + }, + "OrganizationConformancePackStatuses": { + "base": null, + "refs": { + "DescribeOrganizationConformancePackStatusesResponse$OrganizationConformancePackStatuses": "

A list of OrganizationConformancePackStatus objects.

" + } + }, + "OrganizationConformancePackTemplateValidationException": { + "base": "

You have specified a template that is not valid or supported.

", + "refs": { + } + }, + "OrganizationConformancePacks": { + "base": null, + "refs": { + "DescribeOrganizationConformancePacksResponse$OrganizationConformancePacks": "

Returns a list of OrganizationConformancePacks objects.

" } }, "OrganizationCustomRuleMetadata": { @@ -1670,6 +2090,25 @@ "PutOrganizationConfigRuleRequest$OrganizationManagedRuleMetadata": "

An OrganizationManagedRuleMetadata object.

" } }, + "OrganizationResourceDetailedStatus": { + "base": null, + "refs": { + "OrganizationConformancePackDetailedStatus$Status": "

Indicates deployment status for conformance pack in a member account. When master account calls PutOrganizationConformancePack action for the first time, conformance pack status is created in the member account. When master account calls PutOrganizationConformancePack action for the second time, conformance pack status is updated in the member account. Conformance pack status is deleted when the master account deletes OrganizationConformancePack and disables service access for config-multiaccountsetup.amazonaws.com.

AWS Config sets the state of the conformance pack to:

", + "OrganizationResourceDetailedStatusFilters$Status": "

Indicates deployment status for conformance pack in a member account. When master account calls PutOrganizationConformancePack action for the first time, conformance pack status is created in the member account. When master account calls PutOrganizationConformancePack action for the second time, conformance pack status is updated in the member account. Conformance pack status is deleted when the master account deletes OrganizationConformancePack and disables service access for config-multiaccountsetup.amazonaws.com.

AWS Config sets the state of the conformance pack to:

" + } + }, + "OrganizationResourceDetailedStatusFilters": { + "base": "

Status filter object to filter results based on specific member account ID or status type for an organization conformance pack.

", + "refs": { + "GetOrganizationConformancePackDetailedStatusRequest$Filters": "

An OrganizationResourceDetailedStatusFilters object.

" + } + }, + "OrganizationResourceStatus": { + "base": null, + "refs": { + "OrganizationConformancePackStatus$Status": "

Indicates deployment status of an organization conformance pack. When master account calls PutOrganizationConformancePack for the first time, conformance pack status is created in all the member accounts. When master account calls PutOrganizationConformancePack for the second time, conformance pack status is updated in all the member accounts. Additionally, conformance pack status is updated when one or more member accounts join or leave an organization. Conformance pack status is deleted when the master account deletes OrganizationConformancePack in all the member accounts and disables service access for config-multiaccountsetup.amazonaws.com.

AWS Config sets the state of the conformance pack to:

" + } + }, "OrganizationRuleStatus": { "base": null, "refs": { @@ -1687,6 +2126,26 @@ "Source$Owner": "

Indicates whether AWS or the customer owns and manages the AWS Config rule.

" } }, + "PageSizeLimit": { + "base": null, + "refs": { + "DescribeConformancePackStatusRequest$Limit": "

The maximum number of conformance packs returned on each page.

", + "DescribeConformancePacksRequest$Limit": "

The maximum number of conformance packs returned on each page.

", + "GetConformancePackComplianceSummaryRequest$Limit": null + } + }, + "ParameterName": { + "base": null, + "refs": { + "ConformancePackInputParameter$ParameterName": "

One part of a key-value pair.

" + } + }, + "ParameterValue": { + "base": null, + "refs": { + "ConformancePackInputParameter$ParameterValue": "

Another part of the key-value pair.

" + } + }, "PendingAggregationRequest": { "base": "

An object that represents the account ID and region of an aggregator account that is requesting authorization but is not yet authorized.

", "refs": { @@ -1736,6 +2195,16 @@ "refs": { } }, + "PutConformancePackRequest": { + "base": null, + "refs": { + } + }, + "PutConformancePackResponse": { + "base": null, + "refs": { + } + }, "PutDeliveryChannelRequest": { "base": "

The input for the PutDeliveryChannel action.

", "refs": { @@ -1761,6 +2230,16 @@ "refs": { } }, + "PutOrganizationConformancePackRequest": { + "base": null, + "refs": { + } + }, + "PutOrganizationConformancePackResponse": { + "base": null, + "refs": { + } + }, "PutRemediationConfigurationsRequest": { "base": null, "refs": { @@ -2038,7 +2517,7 @@ } }, "ResourceInUseException": { - "base": "

You see this exception in the following cases:

", + "base": "

You see this exception in the following cases:

", "refs": { } }, @@ -2216,6 +2695,12 @@ "ExecutionControls$SsmControls": "

A SsmControls object.

" } }, + "StackArn": { + "base": null, + "refs": { + "ConformancePackStatusDetail$StackArn": "

Amazon Resource Name (ARN) of AWS CloudFormation stack.

" + } + }, "StartConfigRulesEvaluationRequest": { "base": "

", "refs": { @@ -2308,6 +2793,10 @@ "DescribeOrganizationConfigRuleStatusesResponse$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "DescribeOrganizationConfigRulesRequest$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "DescribeOrganizationConfigRulesResponse$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", + "DescribeOrganizationConformancePackStatusesRequest$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", + "DescribeOrganizationConformancePackStatusesResponse$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", + "DescribeOrganizationConformancePacksRequest$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", + "DescribeOrganizationConformancePacksResponse$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "DescribePendingAggregationRequestsRequest$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "DescribePendingAggregationRequestsResponse$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "DescribeRemediationExceptionsRequest$NextToken": "

The nextToken string returned in a previous request that you use to request the next page of results in a paginated response.

", @@ -2322,11 +2811,17 @@ "GetComplianceDetailsByResourceResponse$NextToken": "

The string that you use in a subsequent request to get the next page of results in a paginated response.

", "GetOrganizationConfigRuleDetailedStatusRequest$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "GetOrganizationConfigRuleDetailedStatusResponse$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", + "GetOrganizationConformancePackDetailedStatusRequest$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", + "GetOrganizationConformancePackDetailedStatusResponse$NextToken": "

The nextToken string returned on a previous page that you use to get the next page of results in a paginated response.

", "MemberAccountStatus$ErrorCode": "

An error code that is returned when config rule creation or deletion failed in the member account.

", "MemberAccountStatus$ErrorMessage": "

An error message indicating that config rule account creation or deletion has failed due to an error in the member account.

", "OrganizationAggregationSource$RoleArn": "

ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account.

", "OrganizationConfigRuleStatus$ErrorCode": "

An error code that is returned when organization config rule creation or deletion has failed.

", "OrganizationConfigRuleStatus$ErrorMessage": "

An error message indicating that organization config rule creation or deletion failed due to an error.

", + "OrganizationConformancePackDetailedStatus$ErrorCode": "

An error code that is returned when conformance pack creation or deletion failed in the member account.

", + "OrganizationConformancePackDetailedStatus$ErrorMessage": "

An error message indicating that conformance pack account creation or deletion has failed due to an error in the member account.

", + "OrganizationConformancePackStatus$ErrorCode": "

An error code that is returned when organization conformance pack creation or deletion has failed in the member account.

", + "OrganizationConformancePackStatus$ErrorMessage": "

An error message indicating that organization conformance pack creation or deletion failed due to an error.

", "PutEvaluationsRequest$ResultToken": "

An encrypted token that associates an evaluation with an AWS Config rule. Identifies the rule and the event that triggered the evaluation.

", "RemediationConfiguration$TargetVersion": "

Version of the target. For example, version of the SSM document.

", "RemediationConfiguration$ResourceType": "

The type of a resource.

", @@ -2372,6 +2867,9 @@ "ComplianceResourceTypes$member": null, "ComplianceSummaryByResourceType$ResourceType": "

The type of AWS resource.

", "ConfigRule$CreatedBy": "

Service principal name of the service that created the rule.

The field is populated only if the service linked rule is created by a service. The field is empty if you create your own rule.

", + "ConformancePackComplianceResourceIds$member": null, + "ConformancePackDetail$CreatedBy": null, + "ConformancePackEvaluationFilters$ResourceType": "

Filters the results by the resource type (for example, \"AWS::EC2::Instance\").

", "DescribeComplianceByResourceRequest$ResourceType": "

The types of AWS resources for which you want compliance information (for example, AWS::EC2::Instance). For this action, you can specify that the resource type is an AWS account by specifying AWS::::Account.

", "Evaluation$ComplianceResourceType": "

The type of AWS resource that was evaluated.

", "Evaluation$Annotation": "

Supplementary information about how the evaluation determined the compliance.

", @@ -2381,12 +2879,15 @@ "GetAggregateDiscoveredResourceCountsResponse$GroupByKey": "

The key passed into the request object. If GroupByKey is not provided, the result will be empty.

", "GetComplianceDetailsByResourceRequest$ResourceType": "

The type of the AWS resource for which you want compliance information.

", "GroupedResourceCount$GroupName": "

The name of the group that can be region, account ID, or resource type. For example, region1, region2 if the region was chosen as GroupByKey.

", - "OrganizationConfigRule$OrganizationConfigRuleArn": "

The Amazon Resource Name (ARN) of organization config rule.

", + "OrganizationConfigRule$OrganizationConfigRuleArn": "

Amazon Resource Name (ARN) of organization config rule.

", + "OrganizationConformancePack$OrganizationConformancePackArn": "

Amazon Resource Name (ARN) of organization conformance pack.

", + "OrganizationConformancePackDetailedStatus$ConformancePackName": "

The name of conformance pack deployed in the member account.

", "OrganizationCustomRuleMetadata$LambdaFunctionArn": "

The lambda function ARN.

", "OrganizationCustomRuleMetadata$TagValueScope": "

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

", "OrganizationManagedRuleMetadata$RuleIdentifier": "

For organization config managed rules, a predefined identifier from a list. For example, IAM_PASSWORD_POLICY is a managed rule. To reference a managed rule, see Using AWS Managed Config Rules.

", "OrganizationManagedRuleMetadata$TagValueScope": "

The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key).

", "PutOrganizationConfigRuleResponse$OrganizationConfigRuleArn": "

The Amazon Resource Name (ARN) of an organization config rule.

", + "PutOrganizationConformancePackResponse$OrganizationConformancePackArn": "

ARN of the organization conformance pack.

", "RemediationConfiguration$TargetId": "

Target ID is the name of the public document.

", "RemediationException$ResourceType": "

The type of a resource.

", "RemediationExceptionResourceKey$ResourceType": "

The type of a resource.

", @@ -2411,6 +2912,7 @@ "ComplianceByConfigRule$ConfigRuleName": "

The name of the AWS Config rule.

", "ConfigRule$ConfigRuleName": "

The name that you assign to the AWS Config rule. The name is required if you are adding a new rule.

", "ConfigRuleEvaluationStatus$ConfigRuleName": "

The name of the AWS Config rule.

", + "ConformancePackConfigRuleNames$member": null, "DeleteConfigRuleRequest$ConfigRuleName": "

The name of the AWS Config rule that you want to delete.

", "DeleteEvaluationResultsRequest$ConfigRuleName": "

The name of the AWS Config rule for which you want to delete the evaluation results.

", "EvaluationResultQualifier$ConfigRuleName": "

The name of the AWS Config rule that was used in the evaluation.

", @@ -2498,6 +3000,20 @@ "PutConfigurationAggregatorRequest$Tags": "

An array of tag object.

" } }, + "TemplateBody": { + "base": null, + "refs": { + "PutConformancePackRequest$TemplateBody": "

A string containing full conformance pack template body. Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.

You can only use a YAML template with one resource type, that is, config rule.

", + "PutOrganizationConformancePackRequest$TemplateBody": "

A string containing full conformance pack template body. Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes.

" + } + }, + "TemplateS3Uri": { + "base": null, + "refs": { + "PutConformancePackRequest$TemplateS3Uri": "

Location of file containing the template body. The uri must point to the conformance pack template (max size: 300,000 bytes) that is located in an Amazon S3 bucket in the same region as the conformance pack.

You must have access to read Amazon S3 bucket.

", + "PutOrganizationConformancePackRequest$TemplateS3Uri": "

Location of file containing the template body. The uri must point to the conformance pack template (max size: 300,000 bytes).

You must have access to read Amazon S3 bucket.

" + } + }, "TooManyTagsException": { "base": "

You have reached the limit of the number of tags you can use. You have more than 50 tags.

", "refs": { diff --git a/models/apis/connect/2017-08-08/api-2.json b/models/apis/connect/2017-08-08/api-2.json index 2c626e13e07..801c45db8fb 100644 --- a/models/apis/connect/2017-08-08/api-2.json +++ b/models/apis/connect/2017-08-08/api-2.json @@ -253,6 +253,22 @@ {"shape":"InternalServiceException"} ] }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ] + }, "ListUserHierarchyGroups":{ "name":"ListUserHierarchyGroups", "http":{ @@ -319,6 +335,36 @@ {"shape":"InternalServiceException"} ] }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ] + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InternalServiceException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ] + }, "UpdateContactAttributes":{ "name":"UpdateContactAttributes", "http":{ @@ -537,7 +583,8 @@ "shape":"InstanceId", "location":"uri", "locationName":"InstanceId" - } + }, + "Tags":{"shape":"TagMap"} } }, "CreateUserResponse":{ @@ -1210,6 +1257,23 @@ "NextToken":{"shape":"NextToken"} } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ARN", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{"shape":"TagMap"} + } + }, "ListUserHierarchyGroupsRequest":{ "type":"structure", "required":["InstanceId"], @@ -1718,6 +1782,44 @@ "members":{ } }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ARN", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{"shape":"TagMap"} + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, "Threshold":{ "type":"structure", "members":{ @@ -1745,6 +1847,25 @@ "PERCENT" ] }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ARN", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, "UpdateContactAttributesRequest":{ "type":"structure", "required":[ @@ -1878,7 +1999,8 @@ "DirectoryUserId":{"shape":"DirectoryUserId"}, "SecurityProfileIds":{"shape":"SecurityProfileIds"}, "RoutingProfileId":{"shape":"RoutingProfileId"}, - "HierarchyGroupId":{"shape":"HierarchyGroupId"} + "HierarchyGroupId":{"shape":"HierarchyGroupId"}, + "Tags":{"shape":"TagMap"} } }, "UserId":{"type":"string"}, diff --git a/models/apis/connect/2017-08-08/docs-2.json b/models/apis/connect/2017-08-08/docs-2.json index e321b6592e7..c6ace89b68f 100644 --- a/models/apis/connect/2017-08-08/docs-2.json +++ b/models/apis/connect/2017-08-08/docs-2.json @@ -4,7 +4,7 @@ "operations": { "CreateUser": "

Creates a user account for the specified Amazon Connect instance.

", "DeleteUser": "

Deletes a user account from the specified Amazon Connect instance.

", - "DescribeUser": "

Describes the specified user account.

", + "DescribeUser": "

Describes the specified user account. You can find the instance ID in the console (it’s the final part of the ARN). The console does not display the user IDs. Instead, list the users and note the IDs provided in the output.

", "DescribeUserHierarchyGroup": "

Describes the specified hierarchy group.

", "DescribeUserHierarchyStructure": "

Describes the hierarchy structure of the specified Amazon Connect instance.

", "GetContactAttributes": "

Retrieves the contact attributes for the specified contact.

", @@ -17,10 +17,13 @@ "ListQueues": "

Provides information about the queues for the specified Amazon Connect instance.

", "ListRoutingProfiles": "

Provides summary information about the routing profiles for the specified Amazon Connect instance.

", "ListSecurityProfiles": "

Provides summary information about the security profiles for the specified Amazon Connect instance.

", + "ListTagsForResource": "

Lists the tags for the specified resource.

", "ListUserHierarchyGroups": "

Provides summary information about the hierarchy groups for the specified Amazon Connect instance.

", "ListUsers": "

Provides summary information about the users for the specified Amazon Connect instance.

", "StartOutboundVoiceContact": "

Initiates a contact flow to place an outbound call to a customer.

There is a 60 second dialing timeout for this operation. If the call is not connected after 60 seconds, it fails.

", "StopContact": "

Ends the specified contact.

", + "TagResource": "

Adds the specified tags to the specified resource.

The supported resource type is users.

", + "UntagResource": "

Removes the specified tags from the specified resource.

", "UpdateContactAttributes": "

Creates or updates the contact attributes associated with the specified contact.

You can add or update attributes for both ongoing and completed contacts. For example, you can update the customer's name or the reason the customer called while the call is active, or add notes about steps that the agent took during the call that are displayed to the next agent that takes the call. You can also update attributes for a contact using data from your CRM application and save the data with the contact in Amazon Connect. You could also flag calls for additional analysis, such as legal review or identifying abusive callers.

Contact attributes are available in Amazon Connect for 24 months, and are then deleted.

Important: You cannot use the operation to update attributes for contacts that occurred prior to the release of the API, September 12, 2018. You can update attributes only for contacts that started after the release of the API. If you attempt to update attributes for a contact that occurred prior to the release of the API, a 400 error is returned. This applies also to queued callbacks that were initiated prior to the release of the API but are still active in your instance.

", "UpdateUserHierarchy": "

Assigns the specified hierarchy group to the specified user.

", "UpdateUserIdentityInfo": "

Updates the identity information for the specified user.

", @@ -38,11 +41,14 @@ "HierarchyGroupSummary$Arn": "

The Amazon Resource Name (ARN) of the hierarchy group.

", "HierarchyLevel$Arn": "

The Amazon Resource Name (ARN) of the hierarchy level.

", "HoursOfOperationSummary$Arn": "

The Amazon Resource Name (ARN) of the hours of operation.

", + "ListTagsForResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource.

", "PhoneNumberSummary$Arn": "

The Amazon Resource Name (ARN) of the phone number.

", "QueueReference$Arn": "

The Amazon Resource Name (ARN) of the queue.

", "QueueSummary$Arn": "

The Amazon Resource Name (ARN) of the queue.

", "RoutingProfileSummary$Arn": "

The Amazon Resource Name (ARN) of the routing profile.

", "SecurityProfileSummary$Arn": "

The Amazon Resource Name (ARN) of the security profile.

", + "TagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource.

", + "UntagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource.

", "User$Arn": "

The Amazon Resource Name (ARN) of the user account.

", "UserSummary$Arn": "

The Amazon Resource Name (ARN) of the user account.

" } @@ -613,6 +619,16 @@ "refs": { } }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, "ListUserHierarchyGroupsRequest": { "base": null, "refs": { @@ -914,6 +930,39 @@ "refs": { } }, + "TagKey": { + "base": null, + "refs": { + "TagKeyList$member": null, + "TagMap$key": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceRequest$tagKeys": "

The tag keys.

" + } + }, + "TagMap": { + "base": null, + "refs": { + "CreateUserRequest$Tags": "

One or more tags.

", + "ListTagsForResourceResponse$tags": "

Information about the tags.

", + "TagResourceRequest$tags": "

One or more tags. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.

", + "User$Tags": "

The tags.

" + } + }, + "TagResourceRequest": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "TagMap$value": null + } + }, "Threshold": { "base": "

Contains information about the threshold for service level metrics.

", "refs": { @@ -938,6 +987,11 @@ "HistoricalMetric$Unit": "

The unit for the metric.

" } }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, "UpdateContactAttributesRequest": { "base": null, "refs": { diff --git a/models/apis/dataexchange/2017-07-25/api-2.json b/models/apis/dataexchange/2017-07-25/api-2.json new file mode 100644 index 00000000000..9c4e91b4c58 --- /dev/null +++ b/models/apis/dataexchange/2017-07-25/api-2.json @@ -0,0 +1,2263 @@ +{ + "metadata": { + "apiVersion": "2017-07-25", + "endpointPrefix": "dataexchange", + "signingName": "dataexchange", + "serviceFullName": "AWS Data Exchange", + "serviceId": "DataExchange", + "protocol": "rest-json", + "jsonVersion": "1.1", + "uid": "dataexchange-2017-07-25", + "signatureVersion": "v4" + }, + "operations": { + "CancelJob": { + "name": "CancelJob", + "http": { + "method": "DELETE", + "requestUri": "/v1/jobs/{JobId}", + "responseCode": 204 + }, + "input": { + "shape": "CancelJobRequest" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ConflictException" + } + ] + }, + "CreateDataSet": { + "name": "CreateDataSet", + "http": { + "method": "POST", + "requestUri": "/v1/data-sets", + "responseCode": 201 + }, + "input": { + "shape": "CreateDataSetRequest" + }, + "output": { + "shape": "CreateDataSetResponse" + }, + "errors": [ + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "ServiceLimitExceededException" + }, + { + "shape": "AccessDeniedException" + } + ] + }, + "CreateJob": { + "name": "CreateJob", + "http": { + "method": "POST", + "requestUri": "/v1/jobs", + "responseCode": 201 + }, + "input": { + "shape": "CreateJobRequest" + }, + "output": { + "shape": "CreateJobResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ] + }, + "CreateRevision": { + "name": "CreateRevision", + "http": { + "method": "POST", + "requestUri": "/v1/data-sets/{DataSetId}/revisions", + "responseCode": 201 + }, + "input": { + "shape": "CreateRevisionRequest" + }, + "output": { + "shape": "CreateRevisionResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ] + }, + "DeleteAsset": { + "name": "DeleteAsset", + "http": { + "method": "DELETE", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets/{AssetId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteAssetRequest" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ConflictException" + } + ] + }, + "DeleteDataSet": { + "name": "DeleteDataSet", + "http": { + "method": "DELETE", + "requestUri": "/v1/data-sets/{DataSetId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteDataSetRequest" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ConflictException" + } + ] + }, + "DeleteRevision": { + "name": "DeleteRevision", + "http": { + "method": "DELETE", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteRevisionRequest" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ConflictException" + } + ] + }, + "GetAsset": { + "name": "GetAsset", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets/{AssetId}", + "responseCode": 200 + }, + "input": { + "shape": "GetAssetRequest" + }, + "output": { + "shape": "GetAssetResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "GetDataSet": { + "name": "GetDataSet", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}", + "responseCode": 200 + }, + "input": { + "shape": "GetDataSetRequest" + }, + "output": { + "shape": "GetDataSetResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "GetJob": { + "name": "GetJob", + "http": { + "method": "GET", + "requestUri": "/v1/jobs/{JobId}", + "responseCode": 200 + }, + "input": { + "shape": "GetJobRequest" + }, + "output": { + "shape": "GetJobResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "GetRevision": { + "name": "GetRevision", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}", + "responseCode": 200 + }, + "input": { + "shape": "GetRevisionRequest" + }, + "output": { + "shape": "GetRevisionResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "ListDataSetRevisions": { + "name": "ListDataSetRevisions", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}/revisions", + "responseCode": 200 + }, + "input": { + "shape": "ListDataSetRevisionsRequest" + }, + "output": { + "shape": "ListDataSetRevisionsResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "ListDataSets": { + "name": "ListDataSets", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets", + "responseCode": 200 + }, + "input": { + "shape": "ListDataSetsRequest" + }, + "output": { + "shape": "ListDataSetsResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "ListJobs": { + "name": "ListJobs", + "http": { + "method": "GET", + "requestUri": "/v1/jobs", + "responseCode": 200 + }, + "input": { + "shape": "ListJobsRequest" + }, + "output": { + "shape": "ListJobsResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "ListRevisionAssets": { + "name": "ListRevisionAssets", + "http": { + "method": "GET", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets", + "responseCode": 200 + }, + "input": { + "shape": "ListRevisionAssetsRequest" + }, + "output": { + "shape": "ListRevisionAssetsResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + } + ] + }, + "ListTagsForResource": { + "name": "ListTagsForResource", + "http": { + "method": "GET", + "requestUri": "/tags/{resource-arn}", + "responseCode": 200 + }, + "input": { + "shape": "ListTagsForResourceRequest" + }, + "output": { + "shape": "ListTagsForResourceResponse" + }, + "errors": [] + }, + "StartJob": { + "name": "StartJob", + "http": { + "method": "PATCH", + "requestUri": "/v1/jobs/{JobId}", + "responseCode": 202 + }, + "input": { + "shape": "StartJobRequest" + }, + "output": { + "shape": "StartJobResponse" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ConflictException" + } + ] + }, + "TagResource": { + "name": "TagResource", + "http": { + "method": "POST", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "TagResourceRequest" + }, + "errors": [] + }, + "UntagResource": { + "name": "UntagResource", + "http": { + "method": "DELETE", + "requestUri": "/tags/{resource-arn}", + "responseCode": 204 + }, + "input": { + "shape": "UntagResourceRequest" + }, + "errors": [] + }, + "UpdateAsset": { + "name": "UpdateAsset", + "http": { + "method": "PATCH", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets/{AssetId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateAssetRequest" + }, + "output": { + "shape": "UpdateAssetResponse" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ConflictException" + } + ] + }, + "UpdateDataSet": { + "name": "UpdateDataSet", + "http": { + "method": "PATCH", + "requestUri": "/v1/data-sets/{DataSetId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateDataSetRequest" + }, + "output": { + "shape": "UpdateDataSetResponse" + }, + "errors": [ + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + } + ] + }, + "UpdateRevision": { + "name": "UpdateRevision", + "http": { + "method": "PATCH", + "requestUri": "/v1/data-sets/{DataSetId}/revisions/{RevisionId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateRevisionRequest" + }, + "output": { + "shape": "UpdateRevisionResponse" + }, + "errors": [ + { + "shape": "ValidationException" + }, + { + "shape": "InternalServerException" + }, + { + "shape": "AccessDeniedException" + }, + { + "shape": "ResourceNotFoundException" + }, + { + "shape": "ThrottlingException" + }, + { + "shape": "ConflictException" + } + ] + } + }, + "shapes": { + "AccessDeniedException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 403 + } + }, + "Arn": { + "type": "string" + }, + "AssetDestinationEntry": { + "type": "structure", + "members": { + "AssetId": { + "shape": "Id" + }, + "Bucket": { + "shape": "__string" + }, + "Key": { + "shape": "__string" + } + }, + "required": [ + "Bucket", + "AssetId" + ] + }, + "AssetDetails": { + "type": "structure", + "members": { + "S3SnapshotAsset": { + "shape": "S3SnapshotAsset" + } + } + }, + "AssetEntry": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetDetails": { + "shape": "AssetDetails" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "AssetName" + }, + "RevisionId": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + }, + "required": [ + "AssetType", + "CreatedAt", + "DataSetId", + "Id", + "Arn", + "AssetDetails", + "UpdatedAt", + "RevisionId", + "Name" + ] + }, + "AssetName": { + "type": "string" + }, + "AssetSourceEntry": { + "type": "structure", + "members": { + "Bucket": { + "shape": "__string" + }, + "Key": { + "shape": "__string" + } + }, + "required": [ + "Bucket", + "Key" + ] + }, + "AssetType": { + "type": "string", + "enum": [ + "S3_SNAPSHOT" + ] + }, + "CancelJobRequest": { + "type": "structure", + "members": { + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "JobId" + } + }, + "required": [ + "JobId" + ] + }, + "Code": { + "type": "string", + "enum": [ + "ACCESS_DENIED_EXCEPTION", + "INTERNAL_SERVER_EXCEPTION", + "MALWARE_DETECTED", + "RESOURCE_NOT_FOUND_EXCEPTION", + "SERVICE_QUOTA_EXCEEDED_EXCEPTION", + "VALIDATION_EXCEPTION", + "MALWARE_SCAN_ENCRYPTED_FILE" + ] + }, + "ConflictException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string" + }, + "ResourceId": { + "shape": "__string" + }, + "ResourceType": { + "shape": "ResourceType" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 409 + } + }, + "CreateDataSetRequest": { + "type": "structure", + "members": { + "AssetType": { + "shape": "AssetType" + }, + "Description": { + "shape": "Description" + }, + "Name": { + "shape": "Name" + }, + "Tags": { + "shape": "MapOf__string" + } + }, + "required": [ + "AssetType", + "Description", + "Name" + ] + }, + "CreateDataSetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Description": { + "shape": "Description" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "Name" + }, + "Origin": { + "shape": "Origin" + }, + "OriginDetails": { + "shape": "OriginDetails" + }, + "SourceId": { + "shape": "Id" + }, + "Tags": { + "shape": "MapOf__string" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "CreateJobRequest": { + "type": "structure", + "members": { + "Details": { + "shape": "RequestDetails" + }, + "Type": { + "shape": "Type" + } + }, + "required": [ + "Type", + "Details" + ] + }, + "CreateJobResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Details": { + "shape": "ResponseDetails" + }, + "Errors": { + "shape": "ListOfJobError" + }, + "Id": { + "shape": "Id" + }, + "State": { + "shape": "State" + }, + "Type": { + "shape": "Type" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "CreateRevisionRequest": { + "type": "structure", + "members": { + "Comment": { + "shape": "__stringMin0Max16384" + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "Tags": { + "shape": "MapOf__string" + } + }, + "required": [ + "DataSetId" + ] + }, + "CreateRevisionResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "Comment": { + "shape": "__stringMin0Max16384" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Finalized": { + "shape": "__boolean" + }, + "Id": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "Tags": { + "shape": "MapOf__string" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "DataSetEntry": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Description": { + "shape": "Description" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "Name" + }, + "Origin": { + "shape": "Origin" + }, + "OriginDetails": { + "shape": "OriginDetails" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + }, + "required": [ + "Origin", + "AssetType", + "Description", + "CreatedAt", + "Id", + "Arn", + "UpdatedAt", + "Name" + ] + }, + "DeleteAssetRequest": { + "type": "structure", + "members": { + "AssetId": { + "shape": "__string", + "location": "uri", + "locationName": "AssetId" + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "AssetId", + "DataSetId" + ] + }, + "DeleteDataSetRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + } + }, + "required": [ + "DataSetId" + ] + }, + "DeleteRevisionRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "DataSetId" + ] + }, + "Description": { + "type": "string" + }, + "Details": { + "type": "structure", + "members": { + "ImportAssetFromSignedUrlJobErrorDetails": { + "shape": "ImportAssetFromSignedUrlJobErrorDetails" + }, + "ImportAssetsFromS3JobErrorDetails": { + "shape": "ListOfAssetSourceEntry" + } + } + }, + "ExportAssetToSignedUrlRequestDetails": { + "type": "structure", + "members": { + "AssetId": { + "shape": "Id" + }, + "DataSetId": { + "shape": "Id" + }, + "RevisionId": { + "shape": "Id" + } + }, + "required": [ + "DataSetId", + "AssetId", + "RevisionId" + ] + }, + "ExportAssetToSignedUrlResponseDetails": { + "type": "structure", + "members": { + "AssetId": { + "shape": "Id" + }, + "DataSetId": { + "shape": "Id" + }, + "RevisionId": { + "shape": "Id" + }, + "SignedUrl": { + "shape": "__string" + }, + "SignedUrlExpiresAt": { + "shape": "Timestamp" + } + }, + "required": [ + "DataSetId", + "AssetId", + "RevisionId" + ] + }, + "ExportAssetsToS3RequestDetails": { + "type": "structure", + "members": { + "AssetDestinations": { + "shape": "ListOfAssetDestinationEntry" + }, + "DataSetId": { + "shape": "Id" + }, + "RevisionId": { + "shape": "Id" + } + }, + "required": [ + "AssetDestinations", + "DataSetId", + "RevisionId" + ] + }, + "ExportAssetsToS3ResponseDetails": { + "type": "structure", + "members": { + "AssetDestinations": { + "shape": "ListOfAssetDestinationEntry" + }, + "DataSetId": { + "shape": "Id" + }, + "RevisionId": { + "shape": "Id" + } + }, + "required": [ + "AssetDestinations", + "DataSetId", + "RevisionId" + ] + }, + "GetAssetRequest": { + "type": "structure", + "members": { + "AssetId": { + "shape": "__string", + "location": "uri", + "locationName": "AssetId" + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "AssetId", + "DataSetId" + ] + }, + "GetAssetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetDetails": { + "shape": "AssetDetails" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "AssetName" + }, + "RevisionId": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "GetDataSetRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + } + }, + "required": [ + "DataSetId" + ] + }, + "GetDataSetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Description": { + "shape": "Description" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "Name" + }, + "Origin": { + "shape": "Origin" + }, + "OriginDetails": { + "shape": "OriginDetails" + }, + "SourceId": { + "shape": "Id" + }, + "Tags": { + "shape": "MapOf__string" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "GetJobRequest": { + "type": "structure", + "members": { + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "JobId" + } + }, + "required": [ + "JobId" + ] + }, + "GetJobResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Details": { + "shape": "ResponseDetails" + }, + "Errors": { + "shape": "ListOfJobError" + }, + "Id": { + "shape": "Id" + }, + "State": { + "shape": "State" + }, + "Type": { + "shape": "Type" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "GetRevisionRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "DataSetId" + ] + }, + "GetRevisionResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "Comment": { + "shape": "__stringMin0Max16384" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Finalized": { + "shape": "__boolean" + }, + "Id": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "Tags": { + "shape": "MapOf__string" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "Id": { + "type": "string" + }, + "ImportAssetFromSignedUrlJobErrorDetails": { + "type": "structure", + "members": { + "AssetName": { + "shape": "AssetName" + } + }, + "required": [ + "AssetName" + ] + }, + "ImportAssetFromSignedUrlRequestDetails": { + "type": "structure", + "members": { + "AssetName": { + "shape": "AssetName" + }, + "DataSetId": { + "shape": "Id" + }, + "Md5Hash": { + "shape": "__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093" + }, + "RevisionId": { + "shape": "Id" + } + }, + "required": [ + "DataSetId", + "Md5Hash", + "RevisionId", + "AssetName" + ] + }, + "ImportAssetFromSignedUrlResponseDetails": { + "type": "structure", + "members": { + "AssetName": { + "shape": "AssetName" + }, + "DataSetId": { + "shape": "Id" + }, + "Md5Hash": { + "shape": "__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093" + }, + "RevisionId": { + "shape": "Id" + }, + "SignedUrl": { + "shape": "__string" + }, + "SignedUrlExpiresAt": { + "shape": "Timestamp" + } + }, + "required": [ + "DataSetId", + "AssetName", + "RevisionId" + ] + }, + "ImportAssetsFromS3RequestDetails": { + "type": "structure", + "members": { + "AssetSources": { + "shape": "ListOfAssetSourceEntry" + }, + "DataSetId": { + "shape": "Id" + }, + "RevisionId": { + "shape": "Id" + } + }, + "required": [ + "DataSetId", + "AssetSources", + "RevisionId" + ] + }, + "ImportAssetsFromS3ResponseDetails": { + "type": "structure", + "members": { + "AssetSources": { + "shape": "ListOfAssetSourceEntry" + }, + "DataSetId": { + "shape": "Id" + }, + "RevisionId": { + "shape": "Id" + } + }, + "required": [ + "DataSetId", + "AssetSources", + "RevisionId" + ] + }, + "InternalServerException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 500 + } + }, + "JobEntry": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Details": { + "shape": "ResponseDetails" + }, + "Errors": { + "shape": "ListOfJobError" + }, + "Id": { + "shape": "Id" + }, + "State": { + "shape": "State" + }, + "Type": { + "shape": "Type" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + }, + "required": [ + "Type", + "Details", + "State", + "CreatedAt", + "Id", + "Arn", + "UpdatedAt" + ] + }, + "JobError": { + "type": "structure", + "members": { + "Code": { + "shape": "Code" + }, + "Details": { + "shape": "Details" + }, + "LimitName": { + "shape": "JobErrorLimitName" + }, + "LimitValue": { + "shape": "__double" + }, + "Message": { + "shape": "__string" + }, + "ResourceId": { + "shape": "__string" + }, + "ResourceType": { + "shape": "JobErrorResourceTypes" + } + }, + "required": [ + "Message", + "Code" + ] + }, + "JobErrorLimitName": { + "type": "string", + "enum": [ + "Assets per revision", + "Asset size in GB" + ] + }, + "JobErrorResourceTypes": { + "type": "string", + "enum": [ + "REVISION", + "ASSET" + ] + }, + "LimitName": { + "type": "string", + "enum": [ + "Products per account", + "Data sets per account", + "Data sets per product", + "Revisions per data set", + "Assets per revision", + "Assets per import job from Amazon S3", + "Asset per export job from Amazon S3", + "Asset size in GB", + "Concurrent in progress jobs to import assets from Amazon S3", + "Concurrent in progress jobs to import assets from a signed URL", + "Concurrent in progress jobs to export assets to Amazon S3", + "Concurrent in progress jobs to export assets to a signed URL" + ] + }, + "ListDataSetRevisionsRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + } + }, + "required": [ + "DataSetId" + ] + }, + "ListDataSetRevisionsResponse": { + "type": "structure", + "members": { + "NextToken": { + "shape": "NextToken" + }, + "Revisions": { + "shape": "ListOfRevisionEntry" + } + } + }, + "ListDataSetsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + }, + "Origin": { + "shape": "__string", + "location": "querystring", + "locationName": "origin" + } + } + }, + "ListDataSetsResponse": { + "type": "structure", + "members": { + "DataSets": { + "shape": "ListOfDataSetEntry" + }, + "NextToken": { + "shape": "NextToken" + } + } + }, + "ListJobsRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "querystring", + "locationName": "dataSetId" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + }, + "RevisionId": { + "shape": "__string", + "location": "querystring", + "locationName": "revisionId" + } + } + }, + "ListJobsResponse": { + "type": "structure", + "members": { + "Jobs": { + "shape": "ListOfJobEntry" + }, + "NextToken": { + "shape": "NextToken" + } + } + }, + "ListOfAssetDestinationEntry": { + "type": "list", + "member": { + "shape": "AssetDestinationEntry" + } + }, + "ListOfAssetSourceEntry": { + "type": "list", + "member": { + "shape": "AssetSourceEntry" + } + }, + "ListRevisionAssetsRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "DataSetId" + ] + }, + "ListRevisionAssetsResponse": { + "type": "structure", + "members": { + "Assets": { + "shape": "ListOfAssetEntry" + }, + "NextToken": { + "shape": "NextToken" + } + } + }, + "ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn" + } + }, + "required": [ + "ResourceArn" + ] + }, + "ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "shape": "MapOf__string", + "locationName": "tags" + } + } + }, + "MaxResults": { + "type": "integer", + "min": 1, + "max": 25 + }, + "Name": { + "type": "string" + }, + "NextToken": { + "type": "string" + }, + "Origin": { + "type": "string", + "enum": [ + "OWNED", + "ENTITLED" + ] + }, + "OriginDetails": { + "type": "structure", + "members": { + "ProductId": { + "shape": "__string" + } + }, + "required": [ + "ProductId" + ] + }, + "RequestDetails": { + "type": "structure", + "members": { + "ExportAssetToSignedUrl": { + "shape": "ExportAssetToSignedUrlRequestDetails" + }, + "ExportAssetsToS3": { + "shape": "ExportAssetsToS3RequestDetails" + }, + "ImportAssetFromSignedUrl": { + "shape": "ImportAssetFromSignedUrlRequestDetails" + }, + "ImportAssetsFromS3": { + "shape": "ImportAssetsFromS3RequestDetails" + } + } + }, + "ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string" + }, + "ResourceId": { + "shape": "__string" + }, + "ResourceType": { + "shape": "ResourceType" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 404 + } + }, + "ResourceType": { + "type": "string", + "enum": [ + "DATA_SET", + "REVISION", + "ASSET", + "JOB" + ] + }, + "ResponseDetails": { + "type": "structure", + "members": { + "ExportAssetToSignedUrl": { + "shape": "ExportAssetToSignedUrlResponseDetails" + }, + "ExportAssetsToS3": { + "shape": "ExportAssetsToS3ResponseDetails" + }, + "ImportAssetFromSignedUrl": { + "shape": "ImportAssetFromSignedUrlResponseDetails" + }, + "ImportAssetsFromS3": { + "shape": "ImportAssetsFromS3ResponseDetails" + } + } + }, + "RevisionEntry": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "Comment": { + "shape": "__stringMin0Max16384" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Finalized": { + "shape": "__boolean" + }, + "Id": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + }, + "required": [ + "CreatedAt", + "DataSetId", + "Id", + "Arn", + "UpdatedAt" + ] + }, + "S3SnapshotAsset": { + "type": "structure", + "members": { + "Size": { + "shape": "__doubleMin0" + } + }, + "required": [ + "Size" + ] + }, + "ServiceLimitExceededException": { + "type": "structure", + "members": { + "LimitName": { + "shape": "LimitName" + }, + "LimitValue": { + "shape": "__double" + }, + "Message": { + "shape": "__string" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 402 + } + }, + "StartJobRequest": { + "type": "structure", + "members": { + "JobId": { + "shape": "__string", + "location": "uri", + "locationName": "JobId" + } + }, + "required": [ + "JobId" + ] + }, + "StartJobResponse": { + "type": "structure", + "members": {} + }, + "State": { + "type": "string", + "enum": [ + "WAITING", + "IN_PROGRESS", + "ERROR", + "COMPLETED", + "CANCELLED", + "TIMED_OUT" + ] + }, + "TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn" + }, + "Tags": { + "shape": "MapOf__string", + "locationName": "tags" + } + }, + "required": [ + "ResourceArn", + "Tags" + ] + }, + "ThrottlingException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 429 + } + }, + "Timestamp": { + "type": "timestamp", + "timestampFormat": "iso8601" + }, + "Type": { + "type": "string", + "enum": [ + "IMPORT_ASSETS_FROM_S3", + "IMPORT_ASSET_FROM_SIGNED_URL", + "EXPORT_ASSETS_TO_S3", + "EXPORT_ASSET_TO_SIGNED_URL" + ] + }, + "UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "shape": "__string", + "location": "uri", + "locationName": "resource-arn" + }, + "TagKeys": { + "shape": "ListOf__string", + "location": "querystring", + "locationName": "tagKeys" + } + }, + "required": [ + "TagKeys", + "ResourceArn" + ] + }, + "UpdateAssetRequest": { + "type": "structure", + "members": { + "AssetId": { + "shape": "__string", + "location": "uri", + "locationName": "AssetId" + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "Name": { + "shape": "AssetName" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "AssetId", + "DataSetId", + "Name" + ] + }, + "UpdateAssetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetDetails": { + "shape": "AssetDetails" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "AssetName" + }, + "RevisionId": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "UpdateDataSetRequest": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "Description": { + "shape": "Description" + }, + "Name": { + "shape": "Name" + } + }, + "required": [ + "DataSetId" + ] + }, + "UpdateDataSetResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "AssetType": { + "shape": "AssetType" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "Description": { + "shape": "Description" + }, + "Id": { + "shape": "Id" + }, + "Name": { + "shape": "Name" + }, + "Origin": { + "shape": "Origin" + }, + "OriginDetails": { + "shape": "OriginDetails" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "UpdateRevisionRequest": { + "type": "structure", + "members": { + "Comment": { + "shape": "__stringMin0Max16384" + }, + "DataSetId": { + "shape": "__string", + "location": "uri", + "locationName": "DataSetId" + }, + "Finalized": { + "shape": "__boolean" + }, + "RevisionId": { + "shape": "__string", + "location": "uri", + "locationName": "RevisionId" + } + }, + "required": [ + "RevisionId", + "DataSetId" + ] + }, + "UpdateRevisionResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "Arn" + }, + "Comment": { + "shape": "__stringMin0Max16384" + }, + "CreatedAt": { + "shape": "Timestamp" + }, + "DataSetId": { + "shape": "Id" + }, + "Finalized": { + "shape": "__boolean" + }, + "Id": { + "shape": "Id" + }, + "SourceId": { + "shape": "Id" + }, + "UpdatedAt": { + "shape": "Timestamp" + } + } + }, + "ValidationException": { + "type": "structure", + "members": { + "Message": { + "shape": "__string" + } + }, + "required": [ + "Message" + ], + "exception": true, + "error": { + "httpStatusCode": 400 + } + }, + "__boolean": { + "type": "boolean" + }, + "__double": { + "type": "double" + }, + "__doubleMin0": { + "type": "double" + }, + "ListOfAssetEntry": { + "type": "list", + "member": { + "shape": "AssetEntry" + } + }, + "ListOfDataSetEntry": { + "type": "list", + "member": { + "shape": "DataSetEntry" + } + }, + "ListOfJobEntry": { + "type": "list", + "member": { + "shape": "JobEntry" + } + }, + "ListOfJobError": { + "type": "list", + "member": { + "shape": "JobError" + } + }, + "ListOfRevisionEntry": { + "type": "list", + "member": { + "shape": "RevisionEntry" + } + }, + "ListOf__string": { + "type": "list", + "member": { + "shape": "__string" + } + }, + "MapOf__string": { + "type": "map", + "key": { + "shape": "__string" + }, + "value": { + "shape": "__string" + } + }, + "__string": { + "type": "string" + }, + "__stringMin0Max16384": { + "type": "string", + "min": 0, + "max": 16384 + }, + "__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093": { + "type": "string", + "min": 24, + "max": 24, + "pattern": "/^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$/" + } + }, + "authorizers": { + "create_job_authorizer": { + "name": "create_job_authorizer", + "type": "provided", + "placement": { + "location": "header", + "name": "Authorization" + } + }, + "start_cancel_get_job_authorizer": { + "name": "start_cancel_get_job_authorizer", + "type": "provided", + "placement": { + "location": "header", + "name": "Authorization" + } + } + } +} \ No newline at end of file diff --git a/models/apis/dataexchange/2017-07-25/docs-2.json b/models/apis/dataexchange/2017-07-25/docs-2.json new file mode 100644 index 00000000000..2b6b9fc23f6 --- /dev/null +++ b/models/apis/dataexchange/2017-07-25/docs-2.json @@ -0,0 +1,581 @@ +{ + "version" : "2.0", + "service" : "

This is the API reference for AWS Data Exchange.

", + "operations" : { + "CancelJob" : "

This operation cancels a job. Jobs can be cancelled only when they are in the WAITING state.

", + "CreateDataSet" : "

This operation creates a data set.

", + "CreateJob" : "

This operation creates a job.

", + "CreateRevision" : "

This operation creates a revision for a data set.

", + "DeleteAsset" : "

This operation deletes an asset.

", + "DeleteDataSet" : "

This operation deletes a data set.

", + "DeleteRevision" : "

This operation deletes a revision.

", + "GetAsset" : "

This operation returns information about an asset.

", + "GetDataSet" : "

This operation returns information about a data set.

", + "GetJob" : "

This operation returns information about a job.

", + "GetRevision" : "

This operation returns information about a revision.

", + "ListDataSetRevisions" : "

This operation lists a data set's revisions sorted by CreatedAt in descending order.

", + "ListDataSets" : "

This operation lists your data sets. When listing by origin OWNED, results are sorted by CreatedAt in descending order. When listing by origin ENTITLED, there is no order and the maxResults parameter is ignored.

", + "ListJobs" : "

This operation lists your jobs sorted by CreatedAt in descending order.

", + "ListRevisionAssets" : "

This operation lists a revision's assets sorted alphabetically in descending order.

", + "ListTagsForResource" : "

This operation lists the tags on the resource.

", + "StartJob" : "

This operation starts a job.

", + "TagResource" : "

This operation tags a resource.

", + "UntagResource" : "

This operation removes one or more tags from a resource.

", + "UpdateAsset" : "

This operation updates an asset.

", + "UpdateDataSet" : "

This operation updates a data set.

", + "UpdateRevision" : "

This operation updates a revision.

" + }, + "shapes" : { + "AccessDeniedException" : { + "base" : "

Access to the resource is denied.

", + "refs" : { } + }, + "Arn" : { + "base" : "

An Amazon Resource Name (ARN) that uniquely identifies an AWS resource.

", + "refs" : { + "Asset$Arn" : "

The ARN for the asset.

", + "AssetEntry$Arn" : "

The ARN for the asset.

", + "DataSet$Arn" : "

The ARN for the data set.

", + "DataSetEntry$Arn" : "

The ARN for the data set.

", + "Job$Arn" : "

The ARN for the job.

", + "JobEntry$Arn" : "

The ARN for the job.

", + "Revision$Arn" : "

The ARN for the revision.

", + "RevisionEntry$Arn" : "

The ARN for the revision.

", + "TaggedDataSet$Arn" : "

The ARN for the data set.

", + "TaggedRevision$Arn" : "

The ARN for the revision

" + } + }, + "Asset" : { + "base" : "

An asset in AWS Data Exchange is a piece of data that can be stored as an S3 object. The asset can be a structured data file, an image file, or some other data file. When you create an import job for your files, you create an asset in AWS Data Exchange for each of those files.

", + "refs" : { } + }, + "AssetDestinationEntry" : { + "base" : "

The destination for the asset.

", + "refs" : { + "ListOfAssetDestinationEntry$member" : null + } + }, + "AssetDetails" : { + "base" : null, + "refs" : { + "Asset$AssetDetails" : "

Information about the asset, including its size.

", + "AssetEntry$AssetDetails" : "

Information about the asset, including its size.

" + } + }, + "AssetEntry" : { + "base" : "

An asset in AWS Data Exchange is a piece of data that can be stored as an S3 object. The asset can be a structured data file, an image file, or some other data file. When you create an import job for your files, you create an asset in AWS Data Exchange for each of those files.

", + "refs" : { + "ListOfAssetEntry$member" : null + } + }, + "AssetName" : { + "base" : "

The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

", + "refs" : { + "Asset$Name" : "

The name of the asset When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

", + "AssetEntry$Name" : "

The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

", + "ImportAssetFromSignedUrlJobErrorDetails$AssetName" : null, + "ImportAssetFromSignedUrlRequestDetails$AssetName" : "

The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name.

", + "ImportAssetFromSignedUrlResponseDetails$AssetName" : "

The name for the asset associated with this import response.

", + "UpdateAssetRequest$Name" : "

The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

" + } + }, + "AssetSourceEntry" : { + "base" : "

The source of the assets.

", + "refs" : { + "ListOfAssetSourceEntry$member" : null + } + }, + "AssetType" : { + "base" : "

The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

", + "refs" : { + "Asset$AssetType" : "

The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

", + "AssetEntry$AssetType" : "

The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

", + "CreateDataSetRequest$AssetType" : "

The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

", + "DataSet$AssetType" : "

The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

", + "DataSetEntry$AssetType" : "

The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

", + "TaggedDataSet$AssetType" : "

The type of file your data is stored in. Currently, the supported asset type is S3_SNAPSHOT.

" + } + }, + "Code" : { + "base" : null, + "refs" : { + "JobError$Code" : "The code for the job error." + } + }, + "ConflictException" : { + "base" : "

The request couldn't be completed because it conflicted with the current state of the resource.

", + "refs" : { } + }, + "CreateDataSetRequest" : { + "base" : "

A request to create a data set that contains one or more revisions.

", + "refs" : { } + }, + "CreateJobRequest" : { + "base" : "

The CreateJob request. AWS Data Exchange Jobs are asynchronous import or export operations used to create or copy assets. A data set owner can both import and export as they see fit. Someone with an entitlement to a data set can only export. Jobs are deleted 90 days after they are created. Created jobs must be started with the StartJob operation.

", + "refs" : { } + }, + "CreateRevisionRequest" : { + "base" : "

Creates a revision for a data set. When they're created, revisions are not published to products, and therefore are not available to subscribers. To publish a revision to a data set in a product, the revision must first be finalized.

", + "refs" : { } + }, + "DataSet" : { + "base" : "

A data set is an AWS resource with one or more revisions.

", + "refs" : { } + }, + "DataSetEntry" : { + "base" : "

A data set is an AWS resource with one or more revisions.

", + "refs" : { + "ListOfDataSetEntry$member" : null + } + }, + "Description" : { + "base" : "

A description of a resource.

", + "refs" : { + "CreateDataSetRequest$Description" : "

A description for the data set. This value can be up to 16,348 characters long.

", + "DataSet$Description" : "

The description for the data set.

", + "DataSetEntry$Description" : "

The description for the data set.

", + "TaggedDataSet$Description" : "

The description for the data set.

", + "UpdateDataSetRequest$Description" : "

The description for the data set.

" + } + }, + "Details" : { + "base" : null, + "refs" : { + "JobError$Details" : null + } + }, + "ExportAssetToSignedUrlRequestDetails" : { + "base" : "

Details of the operation to be performed by the job.

", + "refs" : { + "RequestDetails$ExportAssetToSignedUrl" : "

Details about the export to signed URL request.

" + } + }, + "ExportAssetToSignedUrlResponseDetails" : { + "base" : "

The details of the export to signed URL response.

", + "refs" : { + "ResponseDetails$ExportAssetToSignedUrl" : "

Details for the export to signed URL response.

" + } + }, + "ExportAssetsToS3RequestDetails" : { + "base" : "

Details of the operation to be performed by the job.

", + "refs" : { + "RequestDetails$ExportAssetsToS3" : "

Details about the export to Amazon S3 request.

" + } + }, + "ExportAssetsToS3ResponseDetails" : { + "base" : "

Details about the export to Amazon S3 response.

", + "refs" : { + "ResponseDetails$ExportAssetsToS3" : "

Details for the export to Amazon S3 response.

" + } + }, + "Id" : { + "base" : "

A unique identifier.

", + "refs" : { + "Asset$DataSetId" : "

The unique identifier for the data set associated with this asset.

", + "Asset$Id" : "

The unique identifier for the asset.

", + "Asset$RevisionId" : "

The unique identifier for the revision associated with this asset.

", + "Asset$SourceId" : "

The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.

", + "AssetDestinationEntry$AssetId" : "

The unique identifier for the asset.

", + "AssetEntry$DataSetId" : "

The unique identifier for the data set associated with this asset.

", + "AssetEntry$Id" : "

The unique identifier for the asset.

", + "AssetEntry$RevisionId" : "

The unique identifier for the revision associated with this asset.

", + "AssetEntry$SourceId" : "

The asset ID of the owned asset corresponding to the entitled asset being viewed. This parameter is returned when an asset owner is viewing the entitled copy of its owned asset.

", + "DataSet$Id" : "

The unique identifier for the data set.

", + "DataSet$SourceId" : "

The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.

", + "DataSetEntry$Id" : "

The unique identifier for the data set.

", + "DataSetEntry$SourceId" : "

The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.

", + "ExportAssetToSignedUrlRequestDetails$AssetId" : "

The unique identifier for the asset that is exported to a signed URL.

", + "ExportAssetToSignedUrlRequestDetails$DataSetId" : "

The unique identifier for the data set associated with this export job.

", + "ExportAssetToSignedUrlRequestDetails$RevisionId" : "

The unique identifier for the revision associated with this export request.

", + "ExportAssetToSignedUrlResponseDetails$AssetId" : "

The unique identifier for the asset associated with this export job.

", + "ExportAssetToSignedUrlResponseDetails$DataSetId" : "

The unique identifier for the data set associated with this export job.

", + "ExportAssetToSignedUrlResponseDetails$RevisionId" : "

The unique identifier for the revision associated with this export response.

", + "ExportAssetsToS3RequestDetails$DataSetId" : "

The unique identifier for the data set associated with this export job.

", + "ExportAssetsToS3RequestDetails$RevisionId" : "

The unique identifier for the revision associated with this export request.

", + "ExportAssetsToS3ResponseDetails$DataSetId" : "

The unique identifier for the data set associated with this export job.

", + "ExportAssetsToS3ResponseDetails$RevisionId" : "

The unique identifier for the revision associated with this export response.

", + "ImportAssetFromSignedUrlRequestDetails$DataSetId" : "

The unique identifier for the data set associated with this import job.

", + "ImportAssetFromSignedUrlRequestDetails$RevisionId" : "

The unique identifier for the revision associated with this import request.

", + "ImportAssetFromSignedUrlResponseDetails$DataSetId" : "

The unique identifier for the data set associated with this import job.

", + "ImportAssetFromSignedUrlResponseDetails$RevisionId" : "

The unique identifier for the revision associated with this import response.

", + "ImportAssetsFromS3RequestDetails$DataSetId" : "

The unique identifier for the data set associated with this import job.

", + "ImportAssetsFromS3RequestDetails$RevisionId" : "

The unique identifier for the revision associated with this import request.

", + "ImportAssetsFromS3ResponseDetails$DataSetId" : "

The unique identifier for the data set associated with this import job.

", + "ImportAssetsFromS3ResponseDetails$RevisionId" : "

The unique identifier for the revision associated with this import response.

", + "Job$Id" : "

The unique identifier for the job.

", + "JobEntry$Id" : "

The unique identifier for the job.

", + "Revision$DataSetId" : "

The unique identifier for the data set associated with this revision.

", + "Revision$Id" : "

The unique identifier for the revision.

", + "Revision$SourceId" : "

The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.

", + "RevisionEntry$DataSetId" : "

The unique identifier for the data set associated with this revision.

", + "RevisionEntry$Id" : "

The unique identifier for the revision.

", + "RevisionEntry$SourceId" : "

The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.

", + "TaggedDataSet$Id" : "

The unique identifier for the data set.

", + "TaggedDataSet$SourceId" : "

The data set ID of the owned data set corresponding to the entitled data set being viewed. This parameter is returned when a data set owner is viewing the entitled copy of its owned data set.

", + "TaggedRevision$DataSetId" : "

The unique identifier for the data set associated with this revision.

", + "TaggedRevision$Id" : "

The unique identifier for the revision.

", + "TaggedRevision$SourceId" : "

The revision ID of the owned revision corresponding to the entitled revision being viewed. This parameter is returned when a revision owner is viewing the entitled copy of its owned revision.

" + } + }, + "ImportAssetFromSignedUrlJobErrorDetails" : { + "base" : null, + "refs" : { + "Details$ImportAssetFromSignedUrlJobErrorDetails" : null + } + }, + "ImportAssetFromSignedUrlRequestDetails" : { + "base" : "

Details of the operation to be performed by the job.

", + "refs" : { + "RequestDetails$ImportAssetFromSignedUrl" : "

Details about the import from signed URL request.

" + } + }, + "ImportAssetFromSignedUrlResponseDetails" : { + "base" : "

The details in the response for an import request, including the signed URL and other information.

", + "refs" : { + "ResponseDetails$ImportAssetFromSignedUrl" : "

Details for the import from signed URL response.

" + } + }, + "ImportAssetsFromS3RequestDetails" : { + "base" : "

Details of the operation to be performed by the job.

", + "refs" : { + "RequestDetails$ImportAssetsFromS3" : "

Details about the import from Amazon S3 request.

" + } + }, + "ImportAssetsFromS3ResponseDetails" : { + "base" : "

Details from an import from Amazon S3 response.

", + "refs" : { + "ResponseDetails$ImportAssetsFromS3" : "

Details for the import from Amazon S3 response.

" + } + }, + "InternalServerException" : { + "base" : "An exception occurred with the service.", + "refs" : { } + }, + "Job" : { + "base" : "

AWS Data Exchange jobs are asynchronous import or export operations used to create or copy assets. Jobs are deleted 90 days after they are created.

", + "refs" : { } + }, + "JobEntry" : { + "base" : "AWS Data Exchange Jobs are asynchronous import or export operations used to create or copy assets. A data set owner can both import and export as they see fit. Someone with an entitlement to a data set can only export. Jobs are deleted 90 days after they are created.", + "refs" : { + "ListOfJobEntry$member" : null + } + }, + "JobError" : { + "base" : "An error that occurred with the job request.", + "refs" : { + "ListOfJobError$member" : null + } + }, + "JobErrorLimitName" : { + "base" : "The name of the limit that was reached.", + "refs" : { + "JobError$LimitName" : "

The name of the limit that was reached.

" + } + }, + "JobErrorResourceTypes" : { + "base" : "The types of resource which the job error can apply to.", + "refs" : { + "JobError$ResourceType" : "The type of resource related to the error." + } + }, + "LimitName" : { + "base" : null, + "refs" : { + "ServiceQuotaExceededException$LimitName" : "

The name of the quota that was exceeded.

" + } + }, + "ListOfAssetDestinationEntry" : { + "base" : "

The destination where the assets will be exported.

", + "refs" : { + "ExportAssetsToS3RequestDetails$AssetDestinations" : "

The destination for the asset.

", + "ExportAssetsToS3ResponseDetails$AssetDestinations" : "

The destination in Amazon S3 where the asset is exported.

" + } + }, + "ListOfAssetSourceEntry" : { + "base" : "

The list of sources for the assets.

", + "refs" : { + "Details$ImportAssetsFromS3JobErrorDetails" : null, + "ImportAssetsFromS3RequestDetails$AssetSources" : "

Is a list of S3 bucket and object key pairs.

", + "ImportAssetsFromS3ResponseDetails$AssetSources" : "

Is a list of Amazon S3 bucket and object key pairs.

" + } + }, + "ListOfAssets" : { + "base" : "

The asset objects listed by the request.

", + "refs" : { } + }, + "ListOfDataSets" : { + "base" : "

The data set objects listed by the request.

", + "refs" : { } + }, + "ListOfJobs" : { + "base" : "

The token value retrieved from a previous call to access the next page of results.

", + "refs" : { } + }, + "ListOfRevisions" : { + "base" : "

The revision objects listed by the request.

", + "refs" : { } + }, + "Name" : { + "base" : "The name of the model.", + "refs" : { + "CreateDataSetRequest$Name" : "

The name of the data set.

", + "DataSet$Name" : "

The name of the data set.

", + "DataSetEntry$Name" : "

The name of the data set.

", + "TaggedDataSet$Name" : "

The name of the data set.

", + "UpdateDataSetRequest$Name" : "

The name of the data set.

" + } + }, + "NextToken" : { + "base" : "

The token value retrieved from a previous call to access the next page of results.

", + "refs" : { + "ListOfAssets$NextToken" : "

The token value retrieved from a previous call to access the next page of results.

", + "ListOfDataSets$NextToken" : "

The token value retrieved from a previous call to access the next page of results.

", + "ListOfJobs$NextToken" : "

The token value retrieved from a previous call to access the next page of results.

", + "ListOfRevisions$NextToken" : "

The token value retrieved from a previous call to access the next page of results.

" + } + }, + "Origin" : { + "base" : "

A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers). When an owned data set is published in a product, AWS Data Exchange creates a copy of the data set. Subscribers can access that copy of the data set as an entitled data set.

", + "refs" : { + "DataSet$Origin" : "

A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).

", + "DataSetEntry$Origin" : "

A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).

", + "TaggedDataSet$Origin" : "

A property that defines the data set as OWNED by the account (for providers) or ENTITLED to the account (for subscribers).

" + } + }, + "OriginDetails" : { + "base" : null, + "refs" : { + "DataSet$OriginDetails" : "

If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.

", + "DataSetEntry$OriginDetails" : "

If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.

", + "TaggedDataSet$OriginDetails" : "

If the origin of this data set is ENTITLED, includes the details for the product on AWS Marketplace.

" + } + }, + "RequestDetails" : { + "base" : "

The details for the request.

", + "refs" : { + "CreateJobRequest$Details" : "

The details for the CreateJob request.

" + } + }, + "ResourceNotFoundException" : { + "base" : "

The resource couldn't be found.

", + "refs" : { } + }, + "ResourceType" : { + "base" : null, + "refs" : { + "ConflictException$ResourceType" : "

The type of the resource with the conflict.

", + "ResourceNotFoundException$ResourceType" : "

The type of resource that couldn't be found.

" + } + }, + "ResponseDetails" : { + "base" : "

Details for the response.

", + "refs" : { + "Job$Details" : "

Details about the job.

", + "JobEntry$Details" : "

Details of the operation to be performed by the job, such as export destination details or import source details.

" + } + }, + "Revision" : { + "base" : "

A revision is a container for one or more assets.

", + "refs" : { } + }, + "RevisionEntry" : { + "base" : "

A revision is a container for one or more assets.

", + "refs" : { + "ListOfRevisionEntry$member" : null + } + }, + "S3SnapshotAsset" : { + "base" : "

The S3 object that is the asset.

", + "refs" : { + "AssetDetails$S3SnapshotAsset" : null + } + }, + "ServiceLimitExceededException" : { + "base" : "

The request has exceeded the quotas imposed by the service.

", + "refs" : { } + }, + "ServiceQuotaExceededException" : { + "base" : "

The request has exceeded the quotas imposed by the service.

", + "refs" : { } + }, + "State" : { + "base" : null, + "refs" : { + "Job$State" : "

The state of the job.

", + "JobEntry$State" : "

The state of the job.

" + } + }, + "TaggedDataSet" : { + "base" : "

A data set is an AWS resource with one or more revisions.

", + "refs" : { } + }, + "TaggedRevision" : { + "base" : "

A revision tag is an optional label that you can assign to a revision when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to these data sets and revisions.

", + "refs" : { } + }, + "TagsModel" : { + "base" : "You can assign metadata to your AWS Data Exchange resources in the form of tags. Each tag is a label that consists of a customer-defined key and an optional value that can make it easier to manage, search for, and filter resources.", + "refs" : { } + }, + "ThrottlingException" : { + "base" : "

The limit on the number of requests per second was exceeded.

", + "refs" : { } + }, + "Timestamp" : { + "base" : "

Dates and times in AWS Data Exchange are recorded in ISO 8601 format.

", + "refs" : { + "Asset$CreatedAt" : "

The date and time that the asset was created, in ISO 8601 format.

", + "Asset$UpdatedAt" : "

The date and time that the asset was last updated, in ISO 8601 format.

", + "AssetEntry$CreatedAt" : "

The date and time that the asset was created, in ISO 8601 format.

", + "AssetEntry$UpdatedAt" : "

The date and time that the asset was last updated, in ISO 8601 format.

", + "DataSet$CreatedAt" : "

The date and time that the data set was created, in ISO 8601 format.

", + "DataSet$UpdatedAt" : "

The date and time that the data set was last updated, in ISO 8601 format.

", + "DataSetEntry$CreatedAt" : "

The date and time that the data set was created, in ISO 8601 format.

", + "DataSetEntry$UpdatedAt" : "

The date and time that the data set was last updated, in ISO 8601 format.

", + "ExportAssetToSignedUrlResponseDetails$SignedUrlExpiresAt" : "

The date and time that the signed URL expires, in ISO 8601 format.

", + "ImportAssetFromSignedUrlResponseDetails$SignedUrlExpiresAt" : "

The time and date at which the signed URL expires, in ISO 8601 format.

", + "Job$CreatedAt" : "

The date and time that the job was created, in ISO 8601 format.

", + "Job$UpdatedAt" : "

The date and time that the job was last updated, in ISO 8601 format.

", + "JobEntry$CreatedAt" : "

The date and time that the job was created, in ISO 8601 format.

", + "JobEntry$UpdatedAt" : "

The date and time that the job was last updated, in ISO 8601 format.

", + "Revision$CreatedAt" : "

The date and time that the revision was created, in ISO 8601 format.

", + "Revision$UpdatedAt" : "

The date and time that the revision was last updated, in ISO 8601 format.

", + "RevisionEntry$CreatedAt" : "

The date and time that the revision was created, in ISO 8601 format.

", + "RevisionEntry$UpdatedAt" : "

The date and time that the revision was last updated, in ISO 8601 format.

", + "TaggedDataSet$CreatedAt" : "

The date and time that the data set was created, in ISO 8601 format.

", + "TaggedDataSet$UpdatedAt" : "

The date and time that the data set was last updated, in ISO 8601 format.

", + "TaggedRevision$CreatedAt" : "

The date and time that the revision was created, in ISO 8601 format.

", + "TaggedRevision$UpdatedAt" : "

The date and time that the revision was last updated, in ISO 8601 format.

" + } + }, + "Type" : { + "base" : null, + "refs" : { + "CreateJobRequest$Type" : "

The type of job to be created.

", + "Job$Type" : "

The job type.

", + "JobEntry$Type" : "

The job type.

" + } + }, + "UpdateAssetRequest" : { + "base" : "

The request to update an asset.

", + "refs" : { } + }, + "UpdateDataSetRequest" : { + "base" : "

The request to update a data set.

", + "refs" : { } + }, + "UpdateRevisionRequest" : { + "base" : "

The request to update a revision.

", + "refs" : { } + }, + "ValidationException" : { + "base" : "

The request was invalid.

", + "refs" : { } + }, + "__boolean" : { + "base" : null, + "refs" : { + "Revision$Finalized" : "

To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.

Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.

", + "RevisionEntry$Finalized" : "

To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.

Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.

", + "TaggedRevision$Finalized" : "

To publish a revision to a data set in a product, the revision must first be finalized. Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.

Finalized revisions can be published through the AWS Data Exchange console or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace Catalog API action. When using the API, revisions are uniquely identified by their ARN.

", + "UpdateRevisionRequest$Finalized" : "

Finalizing a revision tells AWS Data Exchange that your changes to the assets in the revision are complete. After it's in this read-only state, you can publish the revision to your products.

" + } + }, + "__double" : { + "base" : null, + "refs" : { + "JobError$LimitValue" : "The value of the exceeded limit.", + "ServiceQuotaExceededException$LimitValue" : "

The maximum value for the service-specific limit.

" + } + }, + "__doubleMin0" : { + "base" : null, + "refs" : { + "S3SnapshotAsset$Size" : "

The size of the S3 object that is the object.

" + } + }, + "ListOfAssetEntry" : { + "base" : null, + "refs" : { + "ListOfAssets$Assets" : "

The asset objects listed by the request.

" + } + }, + "ListOfDataSetEntry" : { + "base" : null, + "refs" : { + "ListOfDataSets$DataSets" : "

The data set objects listed by the request.

" + } + }, + "ListOfJobEntry" : { + "base" : null, + "refs" : { + "ListOfJobs$Jobs" : "

The jobs listed by the request.

" + } + }, + "ListOfJobError" : { + "base" : null, + "refs" : { + "Job$Errors" : "

The errors associated with jobs.

", + "JobEntry$Errors" : "

Errors for jobs.

" + } + }, + "ListOfRevisionEntry" : { + "base" : null, + "refs" : { + "ListOfRevisions$Revisions" : "

The asset objects listed by the request.

" + } + }, + "MapOf__string" : { + "base" : null, + "refs" : { + "CreateDataSetRequest$Tags" : "

A data set tag is an optional label that you can assign to a data set when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to these data sets and revisions.

", + "CreateRevisionRequest$Tags" : "

A revision tag is an optional label that you can assign to a revision when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to these data sets and revisions.

", + "TaggedDataSet$Tags" : "

The tags for the data set.

", + "TaggedRevision$Tags" : "

The tags for the revision.

", + "TagsModel$Tags" : "A label that consists of a customer-defined key and an optional value." + } + }, + "__string" : { + "base" : null, + "refs" : { + "AccessDeniedException$Message" : "

Access to the resource is denied.

", + "AssetDestinationEntry$Bucket" : "

The S3 bucket that is the destination for the asset.

", + "AssetDestinationEntry$Key" : "

The name of the object in Amazon S3 for the asset.

", + "AssetSourceEntry$Bucket" : "

The S3 bucket that's part of the source of the asset.

", + "AssetSourceEntry$Key" : "

The name of the object in Amazon S3 for the asset.

", + "ConflictException$Message" : "

The request couldn't be completed because it conflicted with the current state of the resource.

", + "ConflictException$ResourceId" : "

The unique identifier for the resource with the conflict.

", + "ExportAssetToSignedUrlResponseDetails$SignedUrl" : "

The signed URL for the export request.

", + "ImportAssetFromSignedUrlResponseDetails$SignedUrl" : "

The signed URL.

", + "InternalServerException$Message" : "The message identifying the service exception that occurred.", + "JobError$Message" : "The message related to the job error.", + "JobError$ResourceId" : "The unqiue identifier for the resource related to the error.", + "OriginDetails$ProductId" : null, + "ResourceNotFoundException$Message" : "

The resource couldn't be found.

", + "ResourceNotFoundException$ResourceId" : "

The unique identifier for the resource that couldn't be found.

", + "ServiceQuotaExceededException$Message" : "

The request has exceeded the quotas imposed by the service.

", + "ThrottlingException$Message" : "

The limit on the number of requests per second was exceeded.

", + "ValidationException$Message" : "

The message that informs you about what was invalid about the request.

", + "MapOf__string$member" : null + } + }, + "__stringMin0Max16384" : { + "base" : null, + "refs" : { + "CreateRevisionRequest$Comment" : "

An optional comment about the revision.

", + "Revision$Comment" : "

An optional comment about the revision.

", + "RevisionEntry$Comment" : "

An optional comment about the revision.

", + "TaggedRevision$Comment" : "

An optional comment about the revision.

", + "UpdateRevisionRequest$Comment" : "

An optional comment about the revision.

" + } + }, + "__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093" : { + "base" : null, + "refs" : { + "ImportAssetFromSignedUrlRequestDetails$Md5Hash" : "

The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.

", + "ImportAssetFromSignedUrlResponseDetails$Md5Hash" : "

The Base64-encoded Md5 hash for the asset, used to ensure the integrity of the file at that location.

" + } + } + } +} \ No newline at end of file diff --git a/models/apis/dataexchange/2017-07-25/paginators-1.json b/models/apis/dataexchange/2017-07-25/paginators-1.json new file mode 100644 index 00000000000..d76ccf7f38b --- /dev/null +++ b/models/apis/dataexchange/2017-07-25/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListDataSetRevisions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Revisions" + }, + "ListDataSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "DataSets" + }, + "ListJobs": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Jobs" + }, + "ListRevisionAssets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Assets" + } + } +} \ No newline at end of file diff --git a/models/apis/datasync/2018-11-09/api-2.json b/models/apis/datasync/2018-11-09/api-2.json index a7c3db8efc0..0906e9121f7 100644 --- a/models/apis/datasync/2018-11-09/api-2.json +++ b/models/apis/datasync/2018-11-09/api-2.json @@ -542,6 +542,7 @@ "Name":{"shape":"TagValue"}, "Options":{"shape":"Options"}, "Excludes":{"shape":"FilterList"}, + "Schedule":{"shape":"TaskSchedule"}, "Tags":{"shape":"TagList"} } }, @@ -720,6 +721,7 @@ "DestinationNetworkInterfaceArns":{"shape":"DestinationNetworkInterfaceArns"}, "Options":{"shape":"Options"}, "Excludes":{"shape":"FilterList"}, + "Schedule":{"shape":"TaskSchedule"}, "ErrorCode":{"shape":"string"}, "ErrorDetail":{"shape":"string"}, "CreationTime":{"shape":"Time"} @@ -775,7 +777,8 @@ "type":"string", "enum":[ "PUBLIC", - "PRIVATE_LINK" + "PRIVATE_LINK", + "FIPS" ] }, "FilterList":{ @@ -972,7 +975,7 @@ "NonEmptySubdirectory":{ "type":"string", "max":4096, - "pattern":"^[a-zA-Z0-9_\\-\\./]+$" + "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]+$" }, "OnPremConfig":{ "type":"structure", @@ -1028,7 +1031,6 @@ "type":"string", "enum":[ "NONE", - "BEST_EFFORT", "PRESERVE" ] }, @@ -1078,6 +1080,11 @@ "DEEP_ARCHIVE" ] }, + "ScheduleExpressionCron":{ + "type":"string", + "max":256, + "pattern":"^[a-zA-Z0-9\\ \\_\\*\\?\\,\\|\\^\\-\\/\\#\\s\\(\\)\\+]*$" + }, "ServerHostname":{ "type":"string", "max":255, @@ -1097,12 +1104,13 @@ "SmbPassword":{ "type":"string", "max":104, - "pattern":"^.{0,104}$" + "pattern":"^.{0,104}$", + "sensitive":true }, "SmbUser":{ "type":"string", "max":104, - "pattern":"^[^\\\\x5B\\\\x5D\\\\/:;|=,+*?]{1,104}$" + "pattern":"^[^\\x5B\\x5D\\\\/:;|=,+*?]{1,104}$" }, "SmbVersion":{ "type":"string", @@ -1134,7 +1142,7 @@ "Subdirectory":{ "type":"string", "max":4096, - "pattern":"^[a-zA-Z0-9_\\-\\./]*$" + "pattern":"^[a-zA-Z0-9_\\-\\+\\./\\(\\)\\p{Zs}]*$" }, "TagKey":{ "type":"string", @@ -1215,6 +1223,7 @@ "members":{ "PrepareDuration":{"shape":"Duration"}, "PrepareStatus":{"shape":"PhaseStatus"}, + "TotalDuration":{"shape":"Duration"}, "TransferDuration":{"shape":"Duration"}, "TransferStatus":{"shape":"PhaseStatus"}, "VerifyDuration":{"shape":"Duration"}, @@ -1254,6 +1263,13 @@ "DISABLED" ] }, + "TaskSchedule":{ + "type":"structure", + "required":["ScheduleExpression"], + "members":{ + "ScheduleExpression":{"shape":"ScheduleExpressionCron"} + } + }, "TaskStatus":{ "type":"string", "enum":[ @@ -1310,6 +1326,7 @@ "TaskArn":{"shape":"TaskArn"}, "Options":{"shape":"Options"}, "Excludes":{"shape":"FilterList"}, + "Schedule":{"shape":"TaskSchedule"}, "Name":{"shape":"TagValue"}, "CloudWatchLogGroupArn":{"shape":"LogGroupArn"} } diff --git a/models/apis/datasync/2018-11-09/docs-2.json b/models/apis/datasync/2018-11-09/docs-2.json index 2afac0729d2..73231a0ec73 100644 --- a/models/apis/datasync/2018-11-09/docs-2.json +++ b/models/apis/datasync/2018-11-09/docs-2.json @@ -7,7 +7,7 @@ "CreateLocationEfs": "

Creates an endpoint for an Amazon EFS file system.

", "CreateLocationNfs": "

Defines a file system on a Network File System (NFS) server that can be read from or written to

", "CreateLocationS3": "

Creates an endpoint for an Amazon S3 bucket.

For AWS DataSync to access a destination S3 bucket, it needs an AWS Identity and Access Management (IAM) role that has the required permissions. You can set up the required permissions by creating an IAM policy that grants the required permissions and attaching the policy to the role. An example of such a policy is shown in the examples section.

For more information, see https://docs.aws.amazon.com/datasync/latest/userguide/working-with-locations.html#create-s3-location in the AWS DataSync User Guide.

", - "CreateLocationSmb": "

Defines a file system on an Server Message Block (SMB) server that can be read from or written to

", + "CreateLocationSmb": "

Defines a file system on an Server Message Block (SMB) server that can be read from or written to.

", "CreateTask": "

Creates a task. A task is a set of two locations (source and destination) and a set of Options that you use to control the behavior of a task. If you don't specify Options when you create a task, AWS DataSync populates them with service defaults.

When you create a task, it first enters the CREATING state. During CREATING AWS DataSync attempts to mount the on-premises Network File System (NFS) location. The task transitions to the AVAILABLE state without waiting for the AWS location to become mounted. If required, AWS DataSync mounts the AWS location before each task execution.

If an agent that is associated with a source (NFS) location goes offline, the task transitions to the UNAVAILABLE status. If the status of the task remains in the CREATING status for more than a few minutes, it means that your agent might be having trouble mounting the source NFS file system. Check the task's ErrorCode and ErrorDetail. Mount issues are often caused by either a misconfigured firewall or a mistyped NFS server host name.

", "DeleteAgent": "

Deletes an agent. To specify which agent to delete, use the Amazon Resource Name (ARN) of the agent in your request. The operation disassociates the agent from your AWS account. However, it doesn't delete the agent virtual machine (VM) from your on-premises environment.

", "DeleteLocation": "

Deletes the configuration of a location used by AWS DataSync.

", @@ -268,6 +268,7 @@ "base": null, "refs": { "TaskExecutionResultDetail$PrepareDuration": "

The total time in milliseconds that AWS DataSync spent in the PREPARING phase.

", + "TaskExecutionResultDetail$TotalDuration": "

The total time in milliseconds that AWS DataSync took to transfer the file from the source to the destination location.

", "TaskExecutionResultDetail$TransferDuration": "

The total time in milliseconds that AWS DataSync spent in the TRANSFERRING phase.

", "TaskExecutionResultDetail$VerifyDuration": "

The total time in milliseconds that AWS DataSync spent in the VERIFYING phase.

" } @@ -526,7 +527,7 @@ "base": null, "refs": { "CreateLocationNfsRequest$Subdirectory": "

The subdirectory in the NFS file system that is used to read data from the NFS source location or write data to the NFS destination. The NFS path should be a path that's exported by the NFS server, or a subdirectory of that path. The path should be such that it can be mounted by other NFS clients in your network.

To see all the paths exported by your NFS server. run \"showmount -e nfs-server-name\" from an NFS client that has access to your server. You can specify any directory that appears in the results, and any subdirectory of that directory. Ensure that the NFS export is accessible without Kerberos authentication.

To transfer all the data in the folder you specified, DataSync needs to have permissions to read all the data. To ensure this, either configure the NFS export with no_root_squash, or ensure that the permissions for all of the files that you want DataSync allow read access for all users. Doing either enables the agent to read the files. For the agent to access directories, you must additionally enable all execute access.

For information about NFS export configuration, see 18.7. The /etc/exports Configuration File in the Red Hat Enterprise Linux documentation.

", - "CreateLocationSmbRequest$Subdirectory": "

The subdirectory in the SMB file system that is used to read data from the SMB source location or write data to the SMB destination. The SMB path should be a path that's exported by the SMB server, or a subdirectory of that path. The path should be such that it can be mounted by other SMB clients in your network.

To transfer all the data in the folder you specified, DataSync needs to have permissions to mount the SMB share, as well as to access all the data in that share. To ensure this, either ensure that the user/password specified belongs to the user who can mount the share, and who has the appropriate permissions for all of the files and directories that you want DataSync to access, or use credentials of a member of the Backup Operators group to mount the share. Doing either enables the agent to access the data. For the agent to access directories, you must additionally enable all execute access.

" + "CreateLocationSmbRequest$Subdirectory": "

The subdirectory in the SMB file system that is used to read data from the SMB source location or write data to the SMB destination. The SMB path should be a path that's exported by the SMB server, or a subdirectory of that path. The path should be such that it can be mounted by other SMB clients in your network.

Subdirectory must be specified with forward slashes. For example /path/to/folder.

To transfer all the data in the folder you specified, DataSync needs to have permissions to mount the SMB share, as well as to access all the data in that share. To ensure this, either ensure that the user/password specified belongs to the user who can mount the share, and who has the appropriate permissions for all of the files and directories that you want DataSync to access, or use credentials of a member of the Backup Operators group to mount the share. Doing either enables the agent to access the data. For the agent to access directories, you must additionally enable all execute access.

" } }, "OnPremConfig": { @@ -618,6 +619,12 @@ "DescribeLocationS3Response$S3StorageClass": "

The Amazon S3 storage class that you chose to store your files in when this location is used as a task destination. For more information about S3 storage classes, see Amazon S3 Storage Classes in the Amazon Simple Storage Service Developer Guide. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see using-storage-classes.

" } }, + "ScheduleExpressionCron": { + "base": null, + "refs": { + "TaskSchedule$ScheduleExpression": "

A cron expression that specifies when AWS DataSync initiates a scheduled transfer from a source to a destination location.

" + } + }, "ServerHostname": { "base": null, "refs": { @@ -677,7 +684,7 @@ "Subdirectory": { "base": null, "refs": { - "CreateLocationEfsRequest$Subdirectory": "

A subdirectory in the location’s path. This subdirectory in the EFS file system is used to read data from the EFS source location or write data to the EFS destination. By default, AWS DataSync uses the root directory.

", + "CreateLocationEfsRequest$Subdirectory": "

A subdirectory in the location’s path. This subdirectory in the EFS file system is used to read data from the EFS source location or write data to the EFS destination. By default, AWS DataSync uses the root directory.

Subdirectory must be specified with forward slashes. For example /path/to/folder.

", "CreateLocationS3Request$Subdirectory": "

A subdirectory in the Amazon S3 bucket. This subdirectory in Amazon S3 is used to read data from the S3 source location or write data to the S3 destination.

" } }, @@ -809,7 +816,15 @@ "TaskQueueing": { "base": null, "refs": { - "Options$TaskQueueing": "

A value that determines whether tasks should be queued before executing the tasks. If set to Enabled, the tasks will queued. The default is Enabled.

If you use the same agent to run multiple tasks you can enable the tasks to run in series. For more information see task-queue.

" + "Options$TaskQueueing": "

A value that determines whether tasks should be queued before executing the tasks. If set to ENABLED, the tasks will be queued. The default is ENABLED.

If you use the same agent to run multiple tasks you can enable the tasks to run in series. For more information see queue-task-execution.

" + } + }, + "TaskSchedule": { + "base": "

Specifies the schedule you want your task to use for repeated executions. For more information, see Schedule Expressions for Rules.

", + "refs": { + "CreateTaskRequest$Schedule": "

Specifies a schedule used to periodically transfer files from a source to a destination location. The schedule should be specified in UTC time. For more information, see task-scheduling.

", + "DescribeTaskResponse$Schedule": "

The schedule used to periodically transfer files from a source to a destination location.

", + "UpdateTaskRequest$Schedule": "

Specifies a schedule used to periodically transfer files from a source to a destination location. You can configure your task to execute hourly, daily, weekly or on specific days of the week. You control when in the day or hour you want the task to execute. The time you specify is UTC time. For more information, see task-scheduling.

" } }, "TaskStatus": { diff --git a/models/apis/discovery/2015-11-01/api-2.json b/models/apis/discovery/2015-11-01/api-2.json index 5197cf8631c..3f12a99c747 100644 --- a/models/apis/discovery/2015-11-01/api-2.json +++ b/models/apis/discovery/2015-11-01/api-2.json @@ -24,7 +24,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "BatchDeleteImportData":{ @@ -37,8 +38,10 @@ "output":{"shape":"BatchDeleteImportDataResponse"}, "errors":[ {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "CreateApplication":{ @@ -53,7 +56,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "CreateTags":{ @@ -69,7 +73,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DeleteApplications":{ @@ -84,7 +89,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DeleteTags":{ @@ -100,7 +106,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeAgents":{ @@ -115,7 +122,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeConfigurations":{ @@ -130,7 +138,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeContinuousExports":{ @@ -147,7 +156,8 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"}, {"shape":"OperationNotPermittedException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeExportConfigurations":{ @@ -163,7 +173,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ], "deprecated":true }, @@ -179,7 +190,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeImportTasks":{ @@ -192,8 +204,10 @@ "output":{"shape":"DescribeImportTasksResponse"}, "errors":[ {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DescribeTags":{ @@ -209,7 +223,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "DisassociateConfigurationItemsFromApplication":{ @@ -224,7 +239,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ExportConfigurations":{ @@ -239,7 +255,8 @@ {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"}, - {"shape":"OperationNotPermittedException"} + {"shape":"OperationNotPermittedException"}, + {"shape":"HomeRegionNotSetException"} ], "deprecated":true }, @@ -255,7 +272,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ListConfigurations":{ @@ -271,7 +289,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "ListServerNeighbors":{ @@ -286,7 +305,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "StartContinuousExport":{ @@ -304,7 +324,8 @@ {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"}, {"shape":"OperationNotPermittedException"}, - {"shape":"ResourceInUseException"} + {"shape":"ResourceInUseException"}, + {"shape":"HomeRegionNotSetException"} ] }, "StartDataCollectionByAgentIds":{ @@ -319,7 +340,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "StartExportTask":{ @@ -335,7 +357,8 @@ {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ServerInternalErrorException"}, - {"shape":"OperationNotPermittedException"} + {"shape":"OperationNotPermittedException"}, + {"shape":"HomeRegionNotSetException"} ] }, "StartImportTask":{ @@ -349,8 +372,10 @@ "errors":[ {"shape":"ResourceInUseException"}, {"shape":"AuthorizationErrorException"}, + {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "StopContinuousExport":{ @@ -368,7 +393,8 @@ {"shape":"ServerInternalErrorException"}, {"shape":"OperationNotPermittedException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ResourceInUseException"} + {"shape":"ResourceInUseException"}, + {"shape":"HomeRegionNotSetException"} ] }, "StopDataCollectionByAgentIds":{ @@ -383,7 +409,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] }, "UpdateApplication":{ @@ -398,7 +425,8 @@ {"shape":"AuthorizationErrorException"}, {"shape":"InvalidParameterException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ServerInternalErrorException"} + {"shape":"ServerInternalErrorException"}, + {"shape":"HomeRegionNotSetException"} ] } }, @@ -975,6 +1003,13 @@ "connectorSummary":{"shape":"CustomerConnectorInfo"} } }, + "HomeRegionNotSetException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "exception":true + }, "ImportStatus":{ "type":"string", "enum":[ diff --git a/models/apis/discovery/2015-11-01/docs-2.json b/models/apis/discovery/2015-11-01/docs-2.json index 123fe8c80f0..aef504ac6e5 100644 --- a/models/apis/discovery/2015-11-01/docs-2.json +++ b/models/apis/discovery/2015-11-01/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "AWS Application Discovery Service

AWS Application Discovery Service helps you plan application migration projects by automatically identifying servers, virtual machines (VMs), software, and software dependencies running in your on-premises data centers. Application Discovery Service also collects application performance data, which can help you assess the outcome of your migration. The data collected by Application Discovery Service is securely retained in an AWS-hosted and managed database in the cloud. You can export the data as a CSV or XML file into your preferred visualization tool or cloud-migration solution to plan your migration. For more information, see AWS Application Discovery Service FAQ.

Application Discovery Service offers two modes of operation:

We recommend that you use agent-based discovery for non-VMware environments and to collect information about software and software dependencies. You can also run agent-based and agentless discovery simultaneously. Use agentless discovery to quickly complete the initial infrastructure assessment and then install agents on select hosts.

Application Discovery Service integrates with application discovery solutions from AWS Partner Network (APN) partners. Third-party application discovery tools can query Application Discovery Service and write to the Application Discovery Service database using a public API. You can then import the data into either a visualization tool or cloud-migration solution.

Application Discovery Service doesn't gather sensitive information. All data is handled according to the AWS Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service.

This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

This guide is intended for use with the AWS Application Discovery Service User Guide .

", + "service": "AWS Application Discovery Service

AWS Application Discovery Service helps you plan application migration projects by automatically identifying servers, virtual machines (VMs), software, and software dependencies running in your on-premises data centers. Application Discovery Service also collects application performance data, which can help you assess the outcome of your migration. The data collected by Application Discovery Service is securely retained in an AWS-hosted and managed database in the cloud. You can export the data as a CSV or XML file into your preferred visualization tool or cloud-migration solution to plan your migration. For more information, see AWS Application Discovery Service FAQ.

Application Discovery Service offers two modes of operation:

We recommend that you use agent-based discovery for non-VMware environments and to collect information about software and software dependencies. You can also run agent-based and agentless discovery simultaneously. Use agentless discovery to quickly complete the initial infrastructure assessment and then install agents on select hosts.

Application Discovery Service integrates with application discovery solutions from AWS Partner Network (APN) partners. Third-party application discovery tools can query Application Discovery Service and write to the Application Discovery Service database using a public API. You can then import the data into either a visualization tool or cloud-migration solution.

Application Discovery Service doesn't gather sensitive information. All data is handled according to the AWS Privacy Policy. You can operate Application Discovery Service offline to inspect collected data before it is shared with the service.

This API reference provides descriptions, syntax, and usage examples for each of the actions and data types for Application Discovery Service. The topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

This guide is intended for use with the AWS Application Discovery Service User Guide .

Remember that you must set your AWS Migration Hub home region before you call any of these APIs, or a HomeRegionNotSetException error will be returned. Also, you must make the API calls while in your home region.

", "operations": { "AssociateConfigurationItemsToApplication": "

Associates one or more configuration items with an application.

", "BatchDeleteImportData": "

Deletes one or more import tasks, each identified by their import ID. Each import task has a number of records that can identify servers or applications.

AWS Application Discovery Service has built-in matching logic that will identify when discovered servers match existing entries that you've previously discovered, the information for the already-existing discovered server is updated. When you delete an import task that contains records that were used to match, the information in those matched records that comes from the deleted records will also be deleted.

", @@ -487,7 +487,7 @@ "ExportIds": { "base": null, "refs": { - "DescribeExportConfigurationsRequest$exportIds": "

A list of continuous export ids to search for.

", + "DescribeExportConfigurationsRequest$exportIds": "

A list of continuous export IDs to search for.

", "DescribeExportTasksRequest$exportIds": "

One or more unique identifiers used to query the status of an export request.

" } }, @@ -566,6 +566,11 @@ "refs": { } }, + "HomeRegionNotSetException": { + "base": "

The home region is not set. Set the home region to continue.

", + "refs": { + } + }, "ImportStatus": { "base": null, "refs": { @@ -706,6 +711,7 @@ "refs": { "AuthorizationErrorException$message": null, "ConflictErrorException$message": null, + "HomeRegionNotSetException$message": null, "InvalidParameterException$message": null, "InvalidParameterValueException$message": null, "OperationNotPermittedException$message": null, diff --git a/models/apis/dlm/2018-01-12/api-2.json b/models/apis/dlm/2018-01-12/api-2.json index d5a1dc46da3..daed8661092 100644 --- a/models/apis/dlm/2018-01-12/api-2.json +++ b/models/apis/dlm/2018-01-12/api-2.json @@ -70,6 +70,48 @@ {"shape":"LimitExceededException"} ] }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "UpdateLifecyclePolicy":{ "name":"UpdateLifecyclePolicy", "http":{ @@ -87,6 +129,18 @@ } }, "shapes":{ + "AvailabilityZone":{ + "type":"string", + "max":16, + "min":0, + "pattern":"([a-z]+-){2,3}\\d[a-z]" + }, + "AvailabilityZoneList":{ + "type":"list", + "member":{"shape":"AvailabilityZone"}, + "max":10, + "min":1 + }, "CopyTags":{"type":"boolean"}, "Count":{ "type":"integer", @@ -105,7 +159,8 @@ "ExecutionRoleArn":{"shape":"ExecutionRoleArn"}, "Description":{"shape":"PolicyDescription"}, "State":{"shape":"SettablePolicyStateValues"}, - "PolicyDetails":{"shape":"PolicyDetails"} + "PolicyDetails":{"shape":"PolicyDetails"}, + "Tags":{"shape":"TagMap"} } }, "CreateLifecyclePolicyResponse":{ @@ -145,7 +200,23 @@ "ErrorCode":{"type":"string"}, "ErrorMessage":{"type":"string"}, "ExcludeBootVolume":{"type":"boolean"}, - "ExecutionRoleArn":{"type":"string"}, + "ExecutionRoleArn":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"arn:aws:iam::\\d+:role/.*" + }, + "FastRestoreRule":{ + "type":"structure", + "required":[ + "Count", + "AvailabilityZones" + ], + "members":{ + "Count":{"shape":"Count"}, + "AvailabilityZones":{"shape":"AvailabilityZoneList"} + } + }, "GetLifecyclePoliciesRequest":{ "type":"structure", "members":{ @@ -241,10 +312,13 @@ "PolicyId":{"shape":"PolicyId"}, "Description":{"shape":"PolicyDescription"}, "State":{"shape":"GettablePolicyStateValues"}, + "StatusMessage":{"shape":"StatusMessage"}, "ExecutionRoleArn":{"shape":"ExecutionRoleArn"}, "DateCreated":{"shape":"Timestamp"}, "DateModified":{"shape":"Timestamp"}, - "PolicyDetails":{"shape":"PolicyDetails"} + "PolicyDetails":{"shape":"PolicyDetails"}, + "Tags":{"shape":"TagMap"}, + "PolicyArn":{"shape":"PolicyArn"} } }, "LifecyclePolicySummary":{ @@ -252,7 +326,8 @@ "members":{ "PolicyId":{"shape":"PolicyId"}, "Description":{"shape":"PolicyDescription"}, - "State":{"shape":"GettablePolicyStateValues"} + "State":{"shape":"GettablePolicyStateValues"}, + "Tags":{"shape":"TagMap"} } }, "LifecyclePolicySummaryList":{ @@ -269,6 +344,23 @@ "error":{"httpStatusCode":429}, "exception":true }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"PolicyArn", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagMap"} + } + }, "Parameter":{"type":"string"}, "ParameterList":{ "type":"list", @@ -280,10 +372,15 @@ "ExcludeBootVolume":{"shape":"ExcludeBootVolume"} } }, + "PolicyArn":{ + "type":"string", + "pattern":"^arn:aws:dlm:[A-Za-z0-9_/.-]{0,63}:\\d+:policy/[0-9A-Za-z_-]{1,128}$" + }, "PolicyDescription":{ "type":"string", "max":500, - "min":0 + "min":0, + "pattern":"[0-9A-Za-z _-]+" }, "PolicyDetails":{ "type":"structure", @@ -295,7 +392,12 @@ "Parameters":{"shape":"Parameters"} } }, - "PolicyId":{"type":"string"}, + "PolicyId":{ + "type":"string", + "max":64, + "min":0, + "pattern":"policy-[A-Za-z0-9]+" + }, "PolicyIdList":{ "type":"list", "member":{"shape":"PolicyId"} @@ -343,7 +445,8 @@ "TagsToAdd":{"shape":"TagsToAddList"}, "VariableTags":{"shape":"VariableTagsList"}, "CreateRule":{"shape":"CreateRule"}, - "RetainRule":{"shape":"RetainRule"} + "RetainRule":{"shape":"RetainRule"}, + "FastRestoreRule":{"shape":"FastRestoreRule"} } }, "ScheduleList":{ @@ -355,7 +458,8 @@ "ScheduleName":{ "type":"string", "max":500, - "min":0 + "min":0, + "pattern":"[\\p{all}]*" }, "SettablePolicyStateValues":{ "type":"string", @@ -364,7 +468,18 @@ "DISABLED" ] }, - "String":{"type":"string"}, + "StatusMessage":{ + "type":"string", + "max":500, + "min":0, + "pattern":"[\\p{all}]*" + }, + "String":{ + "type":"string", + "max":500, + "min":0, + "pattern":"[\\p{all}]*" + }, "Tag":{ "type":"structure", "required":[ @@ -376,7 +491,55 @@ "Value":{"shape":"String"} } }, - "TagFilter":{"type":"string"}, + "TagFilter":{ + "type":"string", + "max":256, + "min":0, + "pattern":"[\\p{all}]*" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"PolicyArn", + "location":"uri", + "locationName":"resourceArn" + }, + "Tags":{"shape":"TagMap"} + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256 + }, "TagsToAddFilterList":{ "type":"list", "member":{"shape":"TagFilter"}, @@ -403,7 +566,9 @@ }, "Time":{ "type":"string", - "pattern":"^([0-9]|0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$" + "max":5, + "min":5, + "pattern":"^(0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$" }, "TimesList":{ "type":"list", @@ -414,6 +579,30 @@ "type":"timestamp", "timestampFormat":"iso8601" }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"PolicyArn", + "location":"uri", + "locationName":"resourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateLifecyclePolicyRequest":{ "type":"structure", "required":["PolicyId"], diff --git a/models/apis/dlm/2018-01-12/docs-2.json b/models/apis/dlm/2018-01-12/docs-2.json index e66c6d48e33..0d0992dc412 100644 --- a/models/apis/dlm/2018-01-12/docs-2.json +++ b/models/apis/dlm/2018-01-12/docs-2.json @@ -6,9 +6,24 @@ "DeleteLifecyclePolicy": "

Deletes the specified lifecycle policy and halts the automated operations that the policy specified.

", "GetLifecyclePolicies": "

Gets summary information about all or the specified data lifecycle policies.

To get complete information about a policy, use GetLifecyclePolicy.

", "GetLifecyclePolicy": "

Gets detailed information about the specified lifecycle policy.

", + "ListTagsForResource": "

Lists the tags for the specified resource.

", + "TagResource": "

Adds the specified tags to the specified resource.

", + "UntagResource": "

Removes the specified tags from the specified resource.

", "UpdateLifecyclePolicy": "

Updates the specified lifecycle policy.

" }, "shapes": { + "AvailabilityZone": { + "base": null, + "refs": { + "AvailabilityZoneList$member": null + } + }, + "AvailabilityZoneList": { + "base": null, + "refs": { + "FastRestoreRule$AvailabilityZones": "

The Availability Zones in which to enable fast snapshot restore.

" + } + }, "CopyTags": { "base": null, "refs": { @@ -18,6 +33,7 @@ "Count": { "base": null, "refs": { + "FastRestoreRule$Count": "

The number of snapshots to be enabled with fast snapshot restore.

", "RetainRule$Count": "

The number of snapshots to keep for each volume, up to a maximum of 1000.

" } }, @@ -79,6 +95,12 @@ "UpdateLifecyclePolicyRequest$ExecutionRoleArn": "

The Amazon Resource Name (ARN) of the IAM role used to run the operations specified by the lifecycle policy.

" } }, + "FastRestoreRule": { + "base": "

Specifies when to enable fast snapshot restore.

", + "refs": { + "Schedule$FastRestoreRule": "

Enable fast snapshot restore.

" + } + }, "GetLifecyclePoliciesRequest": { "base": null, "refs": { @@ -152,6 +174,16 @@ "refs": { } }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, "Parameter": { "base": null, "refs": { @@ -171,6 +203,15 @@ "PolicyDetails$Parameters": "

A set of optional parameters that can be provided by the policy.

" } }, + "PolicyArn": { + "base": null, + "refs": { + "LifecyclePolicy$PolicyArn": "

The Amazon Resource Name (ARN) of the policy.

", + "ListTagsForResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the resource.

", + "TagResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the resource.

", + "UntagResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) of the resource.

" + } + }, "PolicyDescription": { "base": null, "refs": { @@ -183,9 +224,9 @@ "PolicyDetails": { "base": "

Specifies the configuration of a lifecycle policy.

", "refs": { - "CreateLifecyclePolicyRequest$PolicyDetails": "

The configuration details of the lifecycle policy.

Target tags cannot be re-used across lifecycle policies.

", + "CreateLifecyclePolicyRequest$PolicyDetails": "

The configuration details of the lifecycle policy.

", "LifecyclePolicy$PolicyDetails": "

The configuration of the lifecycle policy

", - "UpdateLifecyclePolicyRequest$PolicyDetails": "

The configuration of the lifecycle policy.

Target tags cannot be re-used across policies.

" + "UpdateLifecyclePolicyRequest$PolicyDetails": "

The configuration of the lifecycle policy. You cannot update the policy type or the resource type.

" } }, "PolicyId": { @@ -262,6 +303,12 @@ "UpdateLifecyclePolicyRequest$State": "

The desired activation state of the lifecycle policy after creation.

" } }, + "StatusMessage": { + "base": null, + "refs": { + "LifecyclePolicy$StatusMessage": "

The description of the status.

" + } + }, "String": { "base": null, "refs": { @@ -286,6 +333,45 @@ "TargetTagsFilterList$member": null } }, + "TagKey": { + "base": null, + "refs": { + "TagKeyList$member": null, + "TagMap$key": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceRequest$TagKeys": "

The tag keys.

" + } + }, + "TagMap": { + "base": null, + "refs": { + "CreateLifecyclePolicyRequest$Tags": "

The tags to apply to the lifecycle policy during creation.

", + "LifecyclePolicy$Tags": "

The tags.

", + "LifecyclePolicySummary$Tags": "

The tags.

", + "ListTagsForResourceResponse$Tags": "

Information about the tags.

", + "TagResourceRequest$Tags": "

One or more tags.

" + } + }, + "TagResourceRequest": { + "base": null, + "refs": { + } + }, + "TagResourceResponse": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "TagMap$value": null + } + }, "TagsToAddFilterList": { "base": null, "refs": { @@ -329,6 +415,16 @@ "LifecyclePolicy$DateModified": "

The local date and time when the lifecycle policy was last modified.

" } }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, + "UntagResourceResponse": { + "base": null, + "refs": { + } + }, "UpdateLifecyclePolicyRequest": { "base": null, "refs": { diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index b28bd1ab00a..29830f6d43d 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -1358,6 +1358,15 @@ "input":{"shape":"DescribeExportTasksRequest"}, "output":{"shape":"DescribeExportTasksResult"} }, + "DescribeFastSnapshotRestores":{ + "name":"DescribeFastSnapshotRestores", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFastSnapshotRestoresRequest"}, + "output":{"shape":"DescribeFastSnapshotRestoresResult"} + }, "DescribeFleetHistory":{ "name":"DescribeFleetHistory", "http":{ @@ -2120,6 +2129,15 @@ "input":{"shape":"DisableEbsEncryptionByDefaultRequest"}, "output":{"shape":"DisableEbsEncryptionByDefaultResult"} }, + "DisableFastSnapshotRestores":{ + "name":"DisableFastSnapshotRestores", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisableFastSnapshotRestoresRequest"}, + "output":{"shape":"DisableFastSnapshotRestoresResult"} + }, "DisableTransitGatewayRouteTablePropagation":{ "name":"DisableTransitGatewayRouteTablePropagation", "http":{ @@ -2225,6 +2243,15 @@ "input":{"shape":"EnableEbsEncryptionByDefaultRequest"}, "output":{"shape":"EnableEbsEncryptionByDefaultResult"} }, + "EnableFastSnapshotRestores":{ + "name":"EnableFastSnapshotRestores", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EnableFastSnapshotRestoresRequest"}, + "output":{"shape":"EnableFastSnapshotRestoresResult"} + }, "EnableTransitGatewayRouteTablePropagation":{ "name":"EnableTransitGatewayRouteTablePropagation", "http":{ @@ -2579,6 +2606,15 @@ "input":{"shape":"ModifyInstanceEventStartTimeRequest"}, "output":{"shape":"ModifyInstanceEventStartTimeResult"} }, + "ModifyInstanceMetadataOptions":{ + "name":"ModifyInstanceMetadataOptions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyInstanceMetadataOptionsRequest"}, + "output":{"shape":"ModifyInstanceMetadataOptionsResult"} + }, "ModifyInstancePlacement":{ "name":"ModifyInstancePlacement", "http":{ @@ -4293,6 +4329,13 @@ "unavailable" ] }, + "AvailabilityZoneStringList":{ + "type":"list", + "member":{ + "shape":"String", + "locationName":"AvailabilityZone" + } + }, "AvailableCapacity":{ "type":"structure", "members":{ @@ -5634,7 +5677,7 @@ "locationName":"encrypted" }, "KmsKeyId":{ - "shape":"KmsKeyId", + "shape":"String", "locationName":"kmsKeyId" }, "PresignedUrl":{ @@ -5643,6 +5686,10 @@ }, "SourceRegion":{"shape":"String"}, "SourceSnapshotId":{"shape":"String"}, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "locationName":"TagSpecification" + }, "DryRun":{ "shape":"Boolean", "locationName":"dryRun" @@ -5655,6 +5702,10 @@ "SnapshotId":{ "shape":"String", "locationName":"snapshotId" + }, + "Tags":{ + "shape":"TagList", + "locationName":"tagSet" } } }, @@ -5805,6 +5856,7 @@ }, "CertificateArn":{"shape":"String"}, "Type":{"shape":"GatewayType"}, + "DeviceName":{"shape":"String"}, "DryRun":{ "shape":"Boolean", "locationName":"dryRun" @@ -7217,6 +7269,10 @@ "shape":"String", "locationName":"type" }, + "DeviceName":{ + "shape":"String", + "locationName":"deviceName" + }, "Tags":{ "shape":"TagList", "locationName":"tagSet" @@ -8760,6 +8816,92 @@ } } }, + "DescribeFastSnapshotRestoreSuccessItem":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "State":{ + "shape":"FastSnapshotRestoreStateCode", + "locationName":"state" + }, + "StateTransitionReason":{ + "shape":"String", + "locationName":"stateTransitionReason" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "OwnerAlias":{ + "shape":"String", + "locationName":"ownerAlias" + }, + "EnablingTime":{ + "shape":"MillisecondDateTime", + "locationName":"enablingTime" + }, + "OptimizingTime":{ + "shape":"MillisecondDateTime", + "locationName":"optimizingTime" + }, + "EnabledTime":{ + "shape":"MillisecondDateTime", + "locationName":"enabledTime" + }, + "DisablingTime":{ + "shape":"MillisecondDateTime", + "locationName":"disablingTime" + }, + "DisabledTime":{ + "shape":"MillisecondDateTime", + "locationName":"disabledTime" + } + } + }, + "DescribeFastSnapshotRestoreSuccessSet":{ + "type":"list", + "member":{ + "shape":"DescribeFastSnapshotRestoreSuccessItem", + "locationName":"item" + } + }, + "DescribeFastSnapshotRestoresMaxResults":{ + "type":"integer", + "max":200, + "min":0 + }, + "DescribeFastSnapshotRestoresRequest":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"FilterList", + "locationName":"Filter" + }, + "MaxResults":{"shape":"DescribeFastSnapshotRestoresMaxResults"}, + "NextToken":{"shape":"NextToken"}, + "DryRun":{"shape":"Boolean"} + } + }, + "DescribeFastSnapshotRestoresResult":{ + "type":"structure", + "members":{ + "FastSnapshotRestores":{ + "shape":"DescribeFastSnapshotRestoreSuccessSet", + "locationName":"fastSnapshotRestoreSet" + }, + "NextToken":{ + "shape":"NextToken", + "locationName":"nextToken" + } + } + }, "DescribeFleetError":{ "type":"structure", "members":{ @@ -11519,6 +11661,146 @@ } } }, + "DisableFastSnapshotRestoreErrorItem":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "FastSnapshotRestoreStateErrors":{ + "shape":"DisableFastSnapshotRestoreStateErrorSet", + "locationName":"fastSnapshotRestoreStateErrorSet" + } + } + }, + "DisableFastSnapshotRestoreErrorSet":{ + "type":"list", + "member":{ + "shape":"DisableFastSnapshotRestoreErrorItem", + "locationName":"item" + } + }, + "DisableFastSnapshotRestoreStateError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "DisableFastSnapshotRestoreStateErrorItem":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Error":{ + "shape":"DisableFastSnapshotRestoreStateError", + "locationName":"error" + } + } + }, + "DisableFastSnapshotRestoreStateErrorSet":{ + "type":"list", + "member":{ + "shape":"DisableFastSnapshotRestoreStateErrorItem", + "locationName":"item" + } + }, + "DisableFastSnapshotRestoreSuccessItem":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "State":{ + "shape":"FastSnapshotRestoreStateCode", + "locationName":"state" + }, + "StateTransitionReason":{ + "shape":"String", + "locationName":"stateTransitionReason" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "OwnerAlias":{ + "shape":"String", + "locationName":"ownerAlias" + }, + "EnablingTime":{ + "shape":"MillisecondDateTime", + "locationName":"enablingTime" + }, + "OptimizingTime":{ + "shape":"MillisecondDateTime", + "locationName":"optimizingTime" + }, + "EnabledTime":{ + "shape":"MillisecondDateTime", + "locationName":"enabledTime" + }, + "DisablingTime":{ + "shape":"MillisecondDateTime", + "locationName":"disablingTime" + }, + "DisabledTime":{ + "shape":"MillisecondDateTime", + "locationName":"disabledTime" + } + } + }, + "DisableFastSnapshotRestoreSuccessSet":{ + "type":"list", + "member":{ + "shape":"DisableFastSnapshotRestoreSuccessItem", + "locationName":"item" + } + }, + "DisableFastSnapshotRestoresRequest":{ + "type":"structure", + "required":[ + "AvailabilityZones", + "SourceSnapshotIds" + ], + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZoneStringList", + "locationName":"AvailabilityZone" + }, + "SourceSnapshotIds":{ + "shape":"SnapshotIdStringList", + "locationName":"SourceSnapshotId" + }, + "DryRun":{"shape":"Boolean"} + } + }, + "DisableFastSnapshotRestoresResult":{ + "type":"structure", + "members":{ + "Successful":{ + "shape":"DisableFastSnapshotRestoreSuccessSet", + "locationName":"successful" + }, + "Unsuccessful":{ + "shape":"DisableFastSnapshotRestoreErrorSet", + "locationName":"unsuccessful" + } + } + }, "DisableTransitGatewayRouteTablePropagationRequest":{ "type":"structure", "required":[ @@ -12121,6 +12403,146 @@ } } }, + "EnableFastSnapshotRestoreErrorItem":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "FastSnapshotRestoreStateErrors":{ + "shape":"EnableFastSnapshotRestoreStateErrorSet", + "locationName":"fastSnapshotRestoreStateErrorSet" + } + } + }, + "EnableFastSnapshotRestoreErrorSet":{ + "type":"list", + "member":{ + "shape":"EnableFastSnapshotRestoreErrorItem", + "locationName":"item" + } + }, + "EnableFastSnapshotRestoreStateError":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"String", + "locationName":"code" + }, + "Message":{ + "shape":"String", + "locationName":"message" + } + } + }, + "EnableFastSnapshotRestoreStateErrorItem":{ + "type":"structure", + "members":{ + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "Error":{ + "shape":"EnableFastSnapshotRestoreStateError", + "locationName":"error" + } + } + }, + "EnableFastSnapshotRestoreStateErrorSet":{ + "type":"list", + "member":{ + "shape":"EnableFastSnapshotRestoreStateErrorItem", + "locationName":"item" + } + }, + "EnableFastSnapshotRestoreSuccessItem":{ + "type":"structure", + "members":{ + "SnapshotId":{ + "shape":"String", + "locationName":"snapshotId" + }, + "AvailabilityZone":{ + "shape":"String", + "locationName":"availabilityZone" + }, + "State":{ + "shape":"FastSnapshotRestoreStateCode", + "locationName":"state" + }, + "StateTransitionReason":{ + "shape":"String", + "locationName":"stateTransitionReason" + }, + "OwnerId":{ + "shape":"String", + "locationName":"ownerId" + }, + "OwnerAlias":{ + "shape":"String", + "locationName":"ownerAlias" + }, + "EnablingTime":{ + "shape":"MillisecondDateTime", + "locationName":"enablingTime" + }, + "OptimizingTime":{ + "shape":"MillisecondDateTime", + "locationName":"optimizingTime" + }, + "EnabledTime":{ + "shape":"MillisecondDateTime", + "locationName":"enabledTime" + }, + "DisablingTime":{ + "shape":"MillisecondDateTime", + "locationName":"disablingTime" + }, + "DisabledTime":{ + "shape":"MillisecondDateTime", + "locationName":"disabledTime" + } + } + }, + "EnableFastSnapshotRestoreSuccessSet":{ + "type":"list", + "member":{ + "shape":"EnableFastSnapshotRestoreSuccessItem", + "locationName":"item" + } + }, + "EnableFastSnapshotRestoresRequest":{ + "type":"structure", + "required":[ + "AvailabilityZones", + "SourceSnapshotIds" + ], + "members":{ + "AvailabilityZones":{ + "shape":"AvailabilityZoneStringList", + "locationName":"AvailabilityZone" + }, + "SourceSnapshotIds":{ + "shape":"SnapshotIdStringList", + "locationName":"SourceSnapshotId" + }, + "DryRun":{"shape":"Boolean"} + } + }, + "EnableFastSnapshotRestoresResult":{ + "type":"structure", + "members":{ + "Successful":{ + "shape":"EnableFastSnapshotRestoreSuccessSet", + "locationName":"successful" + }, + "Unsuccessful":{ + "shape":"EnableFastSnapshotRestoreErrorSet", + "locationName":"unsuccessful" + } + } + }, "EnableTransitGatewayRouteTablePropagationRequest":{ "type":"structure", "required":[ @@ -12584,6 +13006,16 @@ "locationName":"item" } }, + "FastSnapshotRestoreStateCode":{ + "type":"string", + "enum":[ + "enabling", + "optimizing", + "enabled", + "disabling", + "disabled" + ] + }, "Filter":{ "type":"structure", "members":{ @@ -13767,6 +14199,13 @@ "host" ] }, + "HttpTokensState":{ + "type":"string", + "enum":[ + "optional", + "required" + ] + }, "HypervisorType":{ "type":"string", "enum":[ @@ -14759,6 +15198,10 @@ "Licenses":{ "shape":"LicenseList", "locationName":"licenseSet" + }, + "MetadataOptions":{ + "shape":"InstanceMetadataOptionsResponse", + "locationName":"metadataOptions" } } }, @@ -15080,6 +15523,49 @@ "targeted" ] }, + "InstanceMetadataEndpointState":{ + "type":"string", + "enum":[ + "disabled", + "enabled" + ] + }, + "InstanceMetadataOptionsRequest":{ + "type":"structure", + "members":{ + "HttpTokens":{"shape":"HttpTokensState"}, + "HttpPutResponseHopLimit":{"shape":"Integer"}, + "HttpEndpoint":{"shape":"InstanceMetadataEndpointState"} + } + }, + "InstanceMetadataOptionsResponse":{ + "type":"structure", + "members":{ + "State":{ + "shape":"InstanceMetadataOptionsState", + "locationName":"state" + }, + "HttpTokens":{ + "shape":"HttpTokensState", + "locationName":"httpTokens" + }, + "HttpPutResponseHopLimit":{ + "shape":"Integer", + "locationName":"httpPutResponseHopLimit" + }, + "HttpEndpoint":{ + "shape":"InstanceMetadataEndpointState", + "locationName":"httpEndpoint" + } + } + }, + "InstanceMetadataOptionsState":{ + "type":"string", + "enum":[ + "pending", + "applied" + ] + }, "InstanceMonitoring":{ "type":"structure", "members":{ @@ -17189,6 +17675,30 @@ } } }, + "ModifyInstanceMetadataOptionsRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"String"}, + "HttpTokens":{"shape":"HttpTokensState"}, + "HttpPutResponseHopLimit":{"shape":"Integer"}, + "HttpEndpoint":{"shape":"InstanceMetadataEndpointState"}, + "DryRun":{"shape":"Boolean"} + } + }, + "ModifyInstanceMetadataOptionsResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"String", + "locationName":"instanceId" + }, + "InstanceMetadataOptions":{ + "shape":"InstanceMetadataOptionsResponse", + "locationName":"instanceMetadataOptions" + } + } + }, "ModifyInstancePlacementRequest":{ "type":"structure", "required":["InstanceId"], @@ -21180,7 +21690,8 @@ "LicenseSpecifications":{ "shape":"LicenseSpecificationListRequest", "locationName":"LicenseSpecification" - } + }, + "MetadataOptions":{"shape":"InstanceMetadataOptionsRequest"} } }, "RunScheduledInstancesRequest":{ @@ -24586,6 +25097,10 @@ "VolumeType":{ "shape":"VolumeType", "locationName":"volumeType" + }, + "FastRestored":{ + "shape":"Boolean", + "locationName":"fastRestored" } } }, diff --git a/models/apis/ec2/2016-11-15/docs-2.json b/models/apis/ec2/2016-11-15/docs-2.json index 9b37c6bd719..11257145ca7 100755 --- a/models/apis/ec2/2016-11-15/docs-2.json +++ b/models/apis/ec2/2016-11-15/docs-2.json @@ -44,7 +44,7 @@ "CreateCapacityReservation": "

Creates a new Capacity Reservation with the specified attributes.

Capacity Reservations enable you to reserve capacity for your Amazon EC2 instances in a specific Availability Zone for any duration. This gives you the flexibility to selectively add capacity reservations and still get the Regional RI discounts for that usage. By creating Capacity Reservations, you ensure that you always have access to Amazon EC2 capacity when you need it, for as long as you need it. For more information, see Capacity Reservations in the Amazon Elastic Compute Cloud User Guide.

Your request to create a Capacity Reservation could fail if Amazon EC2 does not have sufficient capacity to fulfill the request. If your request fails due to Amazon EC2 capacity constraints, either try again at a later time, try in a different Availability Zone, or request a smaller capacity reservation. If your application is flexible across instance types and sizes, try to create a Capacity Reservation with different instance attributes.

Your request could also fail if the requested quantity exceeds your On-Demand Instance limit for the selected instance type. If your request fails due to limit constraints, increase your On-Demand Instance limit for the required instance type and try again. For more information about increasing your instance limits, see Amazon EC2 Service Limits in the Amazon Elastic Compute Cloud User Guide.

", "CreateClientVpnEndpoint": "

Creates a Client VPN endpoint. A Client VPN endpoint is the resource you create and configure to enable and manage client VPN sessions. It is the destination endpoint at which all client VPN sessions are terminated.

", "CreateClientVpnRoute": "

Adds a route to a network to a Client VPN endpoint. Each Client VPN endpoint has a route table that describes the available destination network routes. Each route in the route table specifies the path for traffic to specific resources or networks.

", - "CreateCustomerGateway": "

Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and can be behind a device performing network address translation (NAT).

For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 Region, and 9059, which is reserved in the eu-west-1 Region.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

You cannot create more than one customer gateway with the same VPN type, IP address, and BGP ASN parameter values. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.

", + "CreateCustomerGateway": "

Provides information to AWS about your VPN customer gateway device. The customer gateway is the appliance at your end of the VPN connection. (The device on the AWS side of the VPN connection is the virtual private gateway.) You must provide the Internet-routable IP address of the customer gateway's external interface. The IP address must be static and can be behind a device performing network address translation (NAT).

For devices that use Border Gateway Protocol (BGP), you can also provide the device's BGP Autonomous System Number (ASN). You can use an existing ASN assigned to your network. If you don't have an ASN already, you can use a private ASN (in the 64512 - 65534 range).

Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with the exception of 7224, which is reserved in the us-east-1 Region, and 9059, which is reserved in the eu-west-1 Region.

For more information, see AWS Site-to-Site VPN in the AWS Site-to-Site VPN User Guide.

To create more than one customer gateway with the same VPN type, IP address, and BGP ASN, specify a unique device name for each customer gateway. Identical requests return information about the existing customer gateway and do not create new customer gateways.

", "CreateDefaultSubnet": "

Creates a default subnet with a size /20 IPv4 CIDR block in the specified Availability Zone in your default VPC. You can have only one default subnet per Availability Zone. For more information, see Creating a Default Subnet in the Amazon Virtual Private Cloud User Guide.

", "CreateDefaultVpc": "

Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in each Availability Zone. For more information about the components of a default VPC, see Default VPC and Default Subnets in the Amazon Virtual Private Cloud User Guide. You cannot specify the components of the default VPC yourself.

If you deleted your previous default VPC, you can create a default VPC. You cannot have more than one default VPC per Region.

If your account supports EC2-Classic, you cannot use this action to create a default VPC in a Region that supports EC2-Classic. If you want a default VPC in a Region that supports EC2-Classic, see \"I really want a default VPC for my existing EC2 account. Is that possible?\" in the Default VPCs FAQ.

", "CreateDhcpOptions": "

Creates a set of DHCP options for your VPC. After creating the set, you must associate it with the VPC, causing all existing and new instances that you launch in the VPC to use this set of DHCP options. The following are the individual DHCP options you can specify. For more information about the options, see RFC 2132.

Your VPC automatically starts out with a set of DHCP options that includes only a DNS server that we provide (AmazonProvidedDNS). If you create a set of options, and if your VPC has an internet gateway, make sure to set the domain-name-servers option either to AmazonProvidedDNS or to a domain name server of your choice. For more information, see DHCP Options Sets in the Amazon Virtual Private Cloud User Guide.

", @@ -155,6 +155,7 @@ "DescribeElasticGpus": "

Describes the Elastic Graphics accelerator associated with your instances. For more information about Elastic Graphics, see Amazon Elastic Graphics.

", "DescribeExportImageTasks": "

Describes the specified export image tasks or all your export image tasks.

", "DescribeExportTasks": "

Describes the specified export instance tasks or all your export instance tasks.

", + "DescribeFastSnapshotRestores": "

Describes the state of fast snapshot restores for your snapshots.

", "DescribeFleetHistory": "

Describes the events for the specified EC2 Fleet during the specified time.

", "DescribeFleetInstances": "

Describes the running instances for the specified EC2 Fleet.

", "DescribeFleets": "

Describes the specified EC2 Fleets or all your EC2 Fleets.

", @@ -240,6 +241,7 @@ "DetachVolume": "

Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so can result in the volume becoming stuck in the busy state while detaching. If this happens, detachment can be delayed indefinitely until you unmount the volume, force detachment, reboot the instance, or all three. If an EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.

When a volume with an AWS Marketplace product code is detached from an instance, the product code is no longer associated with the instance.

For more information, see Detaching an Amazon EBS Volume in the Amazon Elastic Compute Cloud User Guide.

", "DetachVpnGateway": "

Detaches a virtual private gateway from a VPC. You do this if you're planning to turn off the VPC and not use it anymore. You can confirm a virtual private gateway has been completely detached from a VPC by describing the virtual private gateway (any attachments to the virtual private gateway are also described).

You must wait for the attachment's state to switch to detached before you can delete the VPC or attach a different VPC to the virtual private gateway.

", "DisableEbsEncryptionByDefault": "

Disables EBS encryption by default for your account in the current Region.

After you disable encryption by default, you can still create encrypted volumes by enabling encryption when you create each volume.

Disabling encryption by default does not change the encryption status of your existing volumes.

For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", + "DisableFastSnapshotRestores": "

Disables fast snapshot restores for the specified snapshots in the specified Availability Zones.

", "DisableTransitGatewayRouteTablePropagation": "

Disables the specified resource attachment from propagating routes to the specified propagation route table.

", "DisableVgwRoutePropagation": "

Disables a virtual private gateway (VGW) from propagating routes to a specified route table of a VPC.

", "DisableVpcClassicLink": "

Disables ClassicLink for a VPC. You cannot disable ClassicLink for a VPC that has EC2-Classic instances linked to it.

", @@ -252,6 +254,7 @@ "DisassociateTransitGatewayRouteTable": "

Disassociates a resource attachment from a transit gateway route table.

", "DisassociateVpcCidrBlock": "

Disassociates a CIDR block from a VPC. To disassociate the CIDR block, you must specify its association ID. You can get the association ID by using DescribeVpcs. You must detach or delete all gateways and resources that are associated with the CIDR block before you can disassociate it.

You cannot disassociate the CIDR block with which you originally created the VPC (the primary CIDR block).

", "EnableEbsEncryptionByDefault": "

Enables EBS encryption by default for your account in the current Region.

After you enable encryption by default, the EBS volumes that you create are are always encrypted, either using the default CMK or the CMK that you specified when you created each volume. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

You can specify the default CMK for encryption by default using ModifyEbsDefaultKmsKeyId or ResetEbsDefaultKmsKeyId.

Enabling encryption by default has no effect on the encryption status of your existing volumes.

After you enable encryption by default, you can no longer launch instances using instance types that do not support encryption. For more information, see Supported Instance Types.

", + "EnableFastSnapshotRestores": "

Enables fast snapshot restores for the specified snapshots in the specified Availability Zones.

You get the full benefit of fast snapshot restores after they enter the enabled state. To get the current state of fast snapshot restores, use DescribeFastSnapshotRestores. To disable fast snapshot restores, use DisableFastSnapshotRestores.

", "EnableTransitGatewayRouteTablePropagation": "

Enables the specified attachment to propagate routes to the specified propagation route table.

", "EnableVgwRoutePropagation": "

Enables a virtual private gateway (VGW) to propagate routes to the specified route table of a VPC.

", "EnableVolumeIO": "

Enables I/O operations for a volume that had I/O operations disabled because the data on the volume was potentially inconsistent.

", @@ -292,6 +295,7 @@ "ModifyInstanceCapacityReservationAttributes": "

Modifies the Capacity Reservation settings for a stopped instance. Use this action to configure an instance to target a specific Capacity Reservation, run in any open Capacity Reservation with matching attributes, or run On-Demand Instance capacity.

", "ModifyInstanceCreditSpecification": "

Modifies the credit option for CPU usage on a running or stopped T2 or T3 instance. The credit options are standard and unlimited.

For more information, see Burstable Performance Instances in the Amazon Elastic Compute Cloud User Guide.

", "ModifyInstanceEventStartTime": "

Modifies the start time for a scheduled Amazon EC2 instance event.

", + "ModifyInstanceMetadataOptions": "

Modify the instance metadata parameters on a running or stopped instance. When you modify the parameters on a stopped instance, they are applied when the instance is started. When you modify the parameters on a running instance, the API responds with a state of “pending”. After the parameter modifications are successfully applied to the instance, the state of the modifications changes from “pending” to “applied” in subsequent describe-instances API calls. For more information, see Instance Metadata and User Data.

", "ModifyInstancePlacement": "

Modifies the placement attributes for a specified instance. You can do the following:

At least one attribute for affinity, host ID, tenancy, or placement group name must be specified in the request. Affinity and tenancy can be modified in the same request.

To modify the host ID, tenancy, placement group, or partition for an instance, the instance must be in the stopped state.

", "ModifyLaunchTemplate": "

Modifies a launch template. You can specify which version of the launch template to set as the default version. When launching an instance, the default version applies when a launch template version is not specified.

", "ModifyNetworkInterfaceAttribute": "

Modifies the specified network interface attribute. You can specify only one attribute at a time. You can use this action to attach and detach security groups from an existing EC2 instance.

", @@ -734,7 +738,7 @@ } }, "AttachVolumeRequest": { - "base": "

Contains the parameters for AttachVolume.

", + "base": null, "refs": { } }, @@ -889,6 +893,13 @@ "AvailabilityZone$State": "

The state of the Availability Zone.

" } }, + "AvailabilityZoneStringList": { + "base": null, + "refs": { + "DisableFastSnapshotRestoresRequest$AvailabilityZones": "

One or more Availability Zones. For example, us-east-2a.

", + "EnableFastSnapshotRestoresRequest$AvailabilityZones": "

One or more Availability Zones. For example, us-east-2a.

" + } + }, "AvailableCapacity": { "base": "

The capacity information for instances launched onto the Dedicated Host.

", "refs": { @@ -1039,7 +1050,7 @@ "CreateRouteTableRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "CreateSecurityGroupRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "CreateSnapshotRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", - "CreateSnapshotsRequest$DryRun": "

Checks whether you have the required permissions for the action without actually making the request. Provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "CreateSnapshotsRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "CreateSpotDatafeedSubscriptionRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "CreateSubnetRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "CreateTagsRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -1134,6 +1145,7 @@ "DescribeEgressOnlyInternetGatewaysRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "DescribeElasticGpusRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "DescribeExportImageTasksRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "DescribeFastSnapshotRestoresRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "DescribeFleetHistoryRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "DescribeFleetInstancesRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "DescribeFleetsRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -1212,8 +1224,9 @@ "DetachVolumeRequest$Force": "

Forces detachment if the previous detachment attempt did not occur cleanly (for example, logging into an instance, unmounting the volume, and detaching normally). This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance won't have an opportunity to flush file system caches or file system metadata. If you use this option, you must perform file system check and repair procedures.

", "DetachVolumeRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "DetachVpnGatewayRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", - "DisableEbsEncryptionByDefaultRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "DisableEbsEncryptionByDefaultRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "DisableEbsEncryptionByDefaultResult$EbsEncryptionByDefault": "

The updated status of encryption by default.

", + "DisableFastSnapshotRestoresRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "DisableTransitGatewayRouteTablePropagationRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "DisableVpcClassicLinkDnsSupportResult$Return": "

Returns true if the request succeeds; otherwise, it returns an error.

", "DisableVpcClassicLinkRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -1227,8 +1240,9 @@ "EbsBlockDevice$Encrypted": "

Indicates whether the encryption state of an EBS volume is changed while being restored from a backing snapshot. The effect of setting the encryption state to true depends on the volume origin (new or from a snapshot), starting encryption state, ownership, and whether encryption by default is enabled. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

In no case can you remove encryption from an encrypted volume.

Encrypted volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types.

", "EbsInstanceBlockDevice$DeleteOnTermination": "

Indicates whether the volume is deleted on instance termination.

", "EbsInstanceBlockDeviceSpecification$DeleteOnTermination": "

Indicates whether the volume is deleted on instance termination.

", - "EnableEbsEncryptionByDefaultRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "EnableEbsEncryptionByDefaultRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "EnableEbsEncryptionByDefaultResult$EbsEncryptionByDefault": "

The updated status of encryption by default.

", + "EnableFastSnapshotRestoresRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "EnableTransitGatewayRouteTablePropagationRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "EnableVolumeIORequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "EnableVpcClassicLinkDnsSupportResult$Return": "

Returns true if the request succeeds; otherwise, it returns an error.

", @@ -1314,6 +1328,7 @@ "ModifyInstanceCapacityReservationAttributesResult$Return": "

Returns true if the request succeeds; otherwise, it returns an error.

", "ModifyInstanceCreditSpecificationRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "ModifyInstanceEventStartTimeRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "ModifyInstanceMetadataOptionsRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "ModifyInstancePlacementResult$Return": "

Is true if the request succeeds, and an error otherwise.

", "ModifyLaunchTemplateRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "ModifyNetworkInterfaceAttributeRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -1453,6 +1468,7 @@ "UpdateSecurityGroupRuleDescriptionsIngressRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "UpdateSecurityGroupRuleDescriptionsIngressResult$Return": "

Returns true if the request succeeds; otherwise, returns an error.

", "Volume$Encrypted": "

Indicates whether the volume is encrypted.

", + "Volume$FastRestored": "

Indicates whether the volume was created using fast snapshot restore.

", "VolumeAttachment$DeleteOnTermination": "

Indicates whether the EBS volume is deleted on instance termination.

", "Vpc$IsDefault": "

Indicates whether the VPC is the default VPC.

", "VpcClassicLink$ClassicLinkEnabled": "

Indicates whether the VPC is enabled for ClassicLink.

", @@ -2079,12 +2095,12 @@ } }, "CopySnapshotRequest": { - "base": "

Contains the parameters for CopySnapshot.

", + "base": null, "refs": { } }, "CopySnapshotResult": { - "base": "

Contains the output of CopySnapshot.

", + "base": null, "refs": { } }, @@ -2386,7 +2402,7 @@ } }, "CreateSnapshotRequest": { - "base": "

Contains the parameters for CreateSnapshot.

", + "base": null, "refs": { } }, @@ -2532,7 +2548,7 @@ } }, "CreateVolumeRequest": { - "base": "

Contains the parameters for CreateVolume.

", + "base": null, "refs": { } }, @@ -3040,7 +3056,7 @@ } }, "DeleteSnapshotRequest": { - "base": "

Contains the parameters for DeleteSnapshot.

", + "base": null, "refs": { } }, @@ -3140,7 +3156,7 @@ } }, "DeleteVolumeRequest": { - "base": "

Contains the parameters for DeleteVolume.

", + "base": null, "refs": { } }, @@ -3497,6 +3513,34 @@ "refs": { } }, + "DescribeFastSnapshotRestoreSuccessItem": { + "base": "

Describes fast snapshot restores for a snapshot.

", + "refs": { + "DescribeFastSnapshotRestoreSuccessSet$member": null + } + }, + "DescribeFastSnapshotRestoreSuccessSet": { + "base": null, + "refs": { + "DescribeFastSnapshotRestoresResult$FastSnapshotRestores": "

Information about the state of fast snapshot restores.

" + } + }, + "DescribeFastSnapshotRestoresMaxResults": { + "base": null, + "refs": { + "DescribeFastSnapshotRestoresRequest$MaxResults": "

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" + } + }, + "DescribeFastSnapshotRestoresRequest": { + "base": null, + "refs": { + } + }, + "DescribeFastSnapshotRestoresResult": { + "base": null, + "refs": { + } + }, "DescribeFleetError": { "base": "

Describes the instances that could not be launched by the fleet.

", "refs": { @@ -4042,12 +4086,12 @@ } }, "DescribeSnapshotAttributeRequest": { - "base": "

Contains the parameters for DescribeSnapshotAttribute.

", + "base": null, "refs": { } }, "DescribeSnapshotAttributeResult": { - "base": "

Contains the output of DescribeSnapshotAttribute.

", + "base": null, "refs": { } }, @@ -4252,12 +4296,12 @@ } }, "DescribeVolumeAttributeRequest": { - "base": "

Contains the parameters for DescribeVolumeAttribute.

", + "base": null, "refs": { } }, "DescribeVolumeAttributeResult": { - "base": "

Contains the output of DescribeVolumeAttribute.

", + "base": null, "refs": { } }, @@ -4467,7 +4511,7 @@ } }, "DetachVolumeRequest": { - "base": "

Contains the parameters for DetachVolume.

", + "base": null, "refs": { } }, @@ -4549,6 +4593,58 @@ "refs": { } }, + "DisableFastSnapshotRestoreErrorItem": { + "base": "

Contains information about the errors that occurred when disabling fast snapshot restores.

", + "refs": { + "DisableFastSnapshotRestoreErrorSet$member": null + } + }, + "DisableFastSnapshotRestoreErrorSet": { + "base": null, + "refs": { + "DisableFastSnapshotRestoresResult$Unsuccessful": "

Information about the snapshots for which fast snapshot restores could not be disabled.

" + } + }, + "DisableFastSnapshotRestoreStateError": { + "base": "

Describes an error that occurred when disabling fast snapshot restores.

", + "refs": { + "DisableFastSnapshotRestoreStateErrorItem$Error": "

The error.

" + } + }, + "DisableFastSnapshotRestoreStateErrorItem": { + "base": "

Contains information about an error that occurred when disabling fast snapshot restores.

", + "refs": { + "DisableFastSnapshotRestoreStateErrorSet$member": null + } + }, + "DisableFastSnapshotRestoreStateErrorSet": { + "base": null, + "refs": { + "DisableFastSnapshotRestoreErrorItem$FastSnapshotRestoreStateErrors": "

The errors.

" + } + }, + "DisableFastSnapshotRestoreSuccessItem": { + "base": "

Describes fast snapshot restores that were successfully disabled.

", + "refs": { + "DisableFastSnapshotRestoreSuccessSet$member": null + } + }, + "DisableFastSnapshotRestoreSuccessSet": { + "base": null, + "refs": { + "DisableFastSnapshotRestoresResult$Successful": "

Information about the snapshots for which fast snapshot restores were successfully disabled.

" + } + }, + "DisableFastSnapshotRestoresRequest": { + "base": null, + "refs": { + } + }, + "DisableFastSnapshotRestoresResult": { + "base": null, + "refs": { + } + }, "DisableTransitGatewayRouteTablePropagationRequest": { "base": null, "refs": { @@ -4890,7 +4986,7 @@ "ElasticInferenceAcceleratorAssociationList": { "base": null, "refs": { - "Instance$ElasticInferenceAcceleratorAssociations": "

The elastic inference accelerator associated with the instance.

" + "Instance$ElasticInferenceAcceleratorAssociations": "

The elastic inference accelerator associated with the instance.

" } }, "ElasticInferenceAccelerators": { @@ -4909,6 +5005,58 @@ "refs": { } }, + "EnableFastSnapshotRestoreErrorItem": { + "base": "

Contains information about the errors that occurred when enabling fast snapshot restores.

", + "refs": { + "EnableFastSnapshotRestoreErrorSet$member": null + } + }, + "EnableFastSnapshotRestoreErrorSet": { + "base": null, + "refs": { + "EnableFastSnapshotRestoresResult$Unsuccessful": "

Information about the snapshots for which fast snapshot restores could not be enabled.

" + } + }, + "EnableFastSnapshotRestoreStateError": { + "base": "

Describes an error that occurred when enabling fast snapshot restores.

", + "refs": { + "EnableFastSnapshotRestoreStateErrorItem$Error": "

The error.

" + } + }, + "EnableFastSnapshotRestoreStateErrorItem": { + "base": "

Contains information about an error that occurred when enabling fast snapshot restores.

", + "refs": { + "EnableFastSnapshotRestoreStateErrorSet$member": null + } + }, + "EnableFastSnapshotRestoreStateErrorSet": { + "base": null, + "refs": { + "EnableFastSnapshotRestoreErrorItem$FastSnapshotRestoreStateErrors": "

The errors.

" + } + }, + "EnableFastSnapshotRestoreSuccessItem": { + "base": "

Describes fast snapshot restores that were successfully enabled.

", + "refs": { + "EnableFastSnapshotRestoreSuccessSet$member": null + } + }, + "EnableFastSnapshotRestoreSuccessSet": { + "base": null, + "refs": { + "EnableFastSnapshotRestoresResult$Successful": "

Information about the snapshots for which fast snapshot restores were successfully enabled.

" + } + }, + "EnableFastSnapshotRestoresRequest": { + "base": null, + "refs": { + } + }, + "EnableFastSnapshotRestoresResult": { + "base": null, + "refs": { + } + }, "EnableTransitGatewayRouteTablePropagationRequest": { "base": null, "refs": { @@ -4925,7 +5073,7 @@ } }, "EnableVolumeIORequest": { - "base": "

Contains the parameters for EnableVolumeIO.

", + "base": null, "refs": { } }, @@ -5129,6 +5277,14 @@ "DeleteQueuedReservedInstancesResult$FailedQueuedPurchaseDeletions": "

Information about the queued purchases that could not be deleted.

" } }, + "FastSnapshotRestoreStateCode": { + "base": null, + "refs": { + "DescribeFastSnapshotRestoreSuccessItem$State": "

The state of fast snapshot restores.

", + "DisableFastSnapshotRestoreSuccessItem$State": "

The state of fast snapshot restores for the snapshot.

", + "EnableFastSnapshotRestoreSuccessItem$State": "

The state of fast snapshot restores.

" + } + }, "Filter": { "base": "

A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs. The filters supported by a describe operation are documented with the describe operation. For example:

", "refs": { @@ -5152,6 +5308,7 @@ "DescribeDhcpOptionsRequest$Filters": "

One or more filters.

", "DescribeElasticGpusRequest$Filters": "

The filters.

", "DescribeExportImageTasksRequest$Filters": "

Filter tasks using the task-state filter and one of the following values: active, completed, deleting, or deleted.

", + "DescribeFastSnapshotRestoresRequest$Filters": "

The filters. The possible values are:

", "DescribeFleetInstancesRequest$Filters": "

The filters.

", "DescribeFleetsRequest$Filters": "

The filters.

", "DescribeFlowLogsRequest$Filter": "

One or more filters.

", @@ -5165,7 +5322,7 @@ "DescribeImportSnapshotTasksRequest$Filters": "

The filters.

", "DescribeInstanceCreditSpecificationsRequest$Filters": "

The filters.

", "DescribeInstanceStatusRequest$Filters": "

The filters.

", - "DescribeInstancesRequest$Filters": "

The filters.

", + "DescribeInstancesRequest$Filters": "

The filters.

", "DescribeInternetGatewaysRequest$Filters": "

One or more filters.

", "DescribeKeyPairsRequest$Filters": "

The filters.

", "DescribeLaunchTemplateVersionsRequest$Filters": "

One or more filters.

", @@ -5210,7 +5367,7 @@ "DescribeVpcEndpointsRequest$Filters": "

One or more filters.

", "DescribeVpcPeeringConnectionsRequest$Filters": "

One or more filters.

", "DescribeVpcsRequest$Filters": "

One or more filters.

", - "DescribeVpnConnectionsRequest$Filters": "

One or more filters.

", + "DescribeVpnConnectionsRequest$Filters": "

One or more filters.

", "DescribeVpnGatewaysRequest$Filters": "

One or more filters.

", "ExportTransitGatewayRoutesRequest$Filters": "

One or more filters. The possible values are:

", "GetTransitGatewayAttachmentPropagationsRequest$Filters": "

One or more filters. The possible values are:

", @@ -5739,6 +5896,14 @@ "ModifyInstancePlacementRequest$Tenancy": "

The tenancy for the instance.

" } }, + "HttpTokensState": { + "base": null, + "refs": { + "InstanceMetadataOptionsRequest$HttpTokens": "

The state of token usage for your instance metadata requests. If the parameter is not specified in the request, the default state is optional.

If the state is optional, you can choose to retrieve instance metadata with or without a signed token header on your request. If you retrieve the IAM role credentials without a token, the version 1.0 role credentials are returned. If you retrieve the IAM role credentials using a valid signed token, the version 2.0 role credentials are returned.

If the state is required, you must send a signed token header with any instance metadata retrieval requests. In this state, retrieving the IAM role credentials always returns the version 2.0 credentials; the version 1.0 credentials are not available.

", + "InstanceMetadataOptionsResponse$HttpTokens": "

The state of token usage for your instance metadata requests. If the parameter is not specified in the request, the default state is optional.

If the state is optional, you can choose to retrieve instance metadata with or without a signed token header on your request. If you retrieve the IAM role credentials without a token, the version 1.0 role credentials are returned. If you retrieve the IAM role credentials using a valid signed token, the version 2.0 role credentials are returned.

If the state is required, you must send a signed token header with any instance metadata retrieval requests. In this state, retrieving the IAM role credential always returns the version 2.0 credentials; the version 1.0 credentials are not available.

", + "ModifyInstanceMetadataOptionsRequest$HttpTokens": "

The state of token usage for your instance metadata requests. If the parameter is not specified in the request, the default state is optional.

If the state is optional, you can choose to retrieve instance metadata with or without a signed token header on your request. If you retrieve the IAM role credentials without a token, the version 1.0 role credentials are returned. If you retrieve the IAM role credentials using a valid signed token, the version 2.0 role credentials are returned.

If the state is required, you must send a signed token header with any instance metadata retrieval requests. In this state, retrieving the IAM role credential always returns the version 2.0 credentials; the version 1.0 credentials are not available.

" + } + }, "HypervisorType": { "base": null, "refs": { @@ -6258,6 +6423,33 @@ "CreateCapacityReservationRequest$InstanceMatchCriteria": "

Indicates the type of instance launches that the Capacity Reservation accepts. The options include:

Default: open

" } }, + "InstanceMetadataEndpointState": { + "base": null, + "refs": { + "InstanceMetadataOptionsRequest$HttpEndpoint": "

This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled.

If you specify a value of disabled, you will not be able to access your instance metadata.

", + "InstanceMetadataOptionsResponse$HttpEndpoint": "

This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled.

If you specify a value of disabled, you will not be able to access your instance metadata.

", + "ModifyInstanceMetadataOptionsRequest$HttpEndpoint": "

This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the existing state is maintained.

If you specify a value of disabled, you will not be able to access your instance metadata.

" + } + }, + "InstanceMetadataOptionsRequest": { + "base": "

The metadata options for the instance.

", + "refs": { + "RunInstancesRequest$MetadataOptions": "

The metadata options for the instance. For more information, see Instance Metadata and User Data.

" + } + }, + "InstanceMetadataOptionsResponse": { + "base": "

The metadata options for the instance.

", + "refs": { + "Instance$MetadataOptions": "

The metadata options for the instance.

", + "ModifyInstanceMetadataOptionsResult$InstanceMetadataOptions": "

The metadata options for the instance.

" + } + }, + "InstanceMetadataOptionsState": { + "base": null, + "refs": { + "InstanceMetadataOptionsResponse$State": "

The state of the metadata option changes.

pending - The metadata options are being updated and the instance is not ready to process metadata traffic with the new selection.

applied - The metadata options have been successfully applied on the instance.

" + } + }, "InstanceMonitoring": { "base": "

Describes the monitoring of an instance.

", "refs": { @@ -6527,6 +6719,8 @@ "InstanceCapacity$AvailableCapacity": "

The number of instances that can still be launched onto the Dedicated Host.

", "InstanceCapacity$TotalCapacity": "

The total number of instances that can be launched onto the Dedicated Host.

", "InstanceCount$InstanceCount": "

The number of listed Reserved Instances in the state specified by the state.

", + "InstanceMetadataOptionsRequest$HttpPutResponseHopLimit": "

The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel.

Default: 1

Possible values: Integers from 1 to 64

", + "InstanceMetadataOptionsResponse$HttpPutResponseHopLimit": "

The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel.

Default: 1

Possible values: Integers from 1 to 64

", "InstanceNetworkInterfaceAttachment$DeviceIndex": "

The index of the device on the instance for the network interface attachment.

", "InstanceNetworkInterfaceSpecification$DeviceIndex": "

The position of the network interface in the attachment order. A primary network interface has a device index of 0.

If you specify a network interface when launching an instance, you must specify the device index.

", "InstanceNetworkInterfaceSpecification$Ipv6AddressCount": "

A number of IPv6 addresses to assign to the network interface. Amazon EC2 chooses the IPv6 addresses from the range of the subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch.

", @@ -6552,6 +6746,7 @@ "LaunchTemplateSpotMarketOptions$BlockDurationMinutes": "

The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

", "LaunchTemplateSpotMarketOptionsRequest$BlockDurationMinutes": "

The required duration for the Spot Instances (also known as Spot blocks), in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).

", "ModifyCapacityReservationRequest$InstanceCount": "

The number of instances for which to reserve capacity.

", + "ModifyInstanceMetadataOptionsRequest$HttpPutResponseHopLimit": "

The desired HTTP PUT response hop limit for instance metadata requests. The larger the number, the further instance metadata requests can travel. If no parameter is specified, the existing state is maintained.

Possible values: Integers from 1 to 64

", "ModifyInstancePlacementRequest$PartitionNumber": "

Reserved for future use.

", "ModifySpotFleetRequestRequest$TargetCapacity": "

The size of the fleet.

", "ModifySpotFleetRequestRequest$OnDemandTargetCapacity": "

The number of On-Demand Instances in the fleet.

", @@ -6834,7 +7029,6 @@ "base": null, "refs": { "CopyImageRequest$KmsKeyId": "

An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix it with \"alias/\". For example:

AWS parses KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.

The specified CMK must exist in the Region that the snapshot is being copied to.

", - "CopySnapshotRequest$KmsKeyId": "

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

", "CreateVolumeRequest$KmsKeyId": "

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

", "ImportImageRequest$KmsKeyId": "

An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted AMI. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

The CMK identifier may be provided in any of the following formats:

AWS parses KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.

The specified CMK must exist in the Region that the AMI is being copied to.

", "ImportSnapshotRequest$KmsKeyId": "

An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted snapshot. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, the Encrypted flag must also be set.

The CMK identifier may be provided in any of the following formats:

AWS parses KmsKeyId asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.

The specified CMK must exist in the Region that the snapshot is being copied to.

", @@ -7323,6 +7517,21 @@ "MillisecondDateTime": { "base": null, "refs": { + "DescribeFastSnapshotRestoreSuccessItem$EnablingTime": "

The time at which fast snapshot restores entered the enabling state.

", + "DescribeFastSnapshotRestoreSuccessItem$OptimizingTime": "

The time at which fast snapshot restores entered the optimizing state.

", + "DescribeFastSnapshotRestoreSuccessItem$EnabledTime": "

The time at which fast snapshot restores entered the enabled state.

", + "DescribeFastSnapshotRestoreSuccessItem$DisablingTime": "

The time at which fast snapshot restores entered the disabling state.

", + "DescribeFastSnapshotRestoreSuccessItem$DisabledTime": "

The time at which fast snapshot restores entered the disabled state.

", + "DisableFastSnapshotRestoreSuccessItem$EnablingTime": "

The time at which fast snapshot restores entered the enabling state.

", + "DisableFastSnapshotRestoreSuccessItem$OptimizingTime": "

The time at which fast snapshot restores entered the optimizing state.

", + "DisableFastSnapshotRestoreSuccessItem$EnabledTime": "

The time at which fast snapshot restores entered the enabled state.

", + "DisableFastSnapshotRestoreSuccessItem$DisablingTime": "

The time at which fast snapshot restores entered the disabling state.

", + "DisableFastSnapshotRestoreSuccessItem$DisabledTime": "

The time at which fast snapshot restores entered the disabled state.

", + "EnableFastSnapshotRestoreSuccessItem$EnablingTime": "

The time at which fast snapshot restores entered the enabling state.

", + "EnableFastSnapshotRestoreSuccessItem$OptimizingTime": "

The time at which fast snapshot restores entered the optimizing state.

", + "EnableFastSnapshotRestoreSuccessItem$EnabledTime": "

The time at which fast snapshot restores entered the enabled state.

", + "EnableFastSnapshotRestoreSuccessItem$DisablingTime": "

The time at which fast snapshot restores entered the disabling state.

", + "EnableFastSnapshotRestoreSuccessItem$DisabledTime": "

The time at which fast snapshot restores entered the disabled state.

", "SnapshotInfo$StartTime": "

Time this snapshot was started. This is the same for all snapshots initiated by the same request.

", "VpcEndpoint$CreationTimestamp": "

The date and time the VPC endpoint was created.

", "VpcEndpointConnection$CreationTimestamp": "

The date and time the VPC endpoint was created.

" @@ -7438,6 +7647,16 @@ "refs": { } }, + "ModifyInstanceMetadataOptionsRequest": { + "base": null, + "refs": { + } + }, + "ModifyInstanceMetadataOptionsResult": { + "base": null, + "refs": { + } + }, "ModifyInstancePlacementRequest": { "base": null, "refs": { @@ -7474,7 +7693,7 @@ } }, "ModifySnapshotAttributeRequest": { - "base": "

Contains the parameters for ModifySnapshotAttribute.

", + "base": null, "refs": { } }, @@ -7540,7 +7759,7 @@ } }, "ModifyVolumeAttributeRequest": { - "base": "

Contains the parameters for ModifyVolumeAttribute.

", + "base": null, "refs": { } }, @@ -7964,6 +8183,8 @@ "DescribeClientVpnTargetNetworksResult$NextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "DescribeExportImageTasksRequest$NextToken": "

A token that indicates the next page of results.

", "DescribeExportImageTasksResult$NextToken": "

The token to use to get the next page of results. This value is null when there are no more results to return.

", + "DescribeFastSnapshotRestoresRequest$NextToken": "

The token for the next page of results.

", + "DescribeFastSnapshotRestoresResult$NextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "DescribeFpgaImagesRequest$NextToken": "

The token to retrieve the next page of results.

", "DescribeFpgaImagesResult$NextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "DescribeIamInstanceProfileAssociationsRequest$NextToken": "

The token to request the next page of results.

", @@ -9030,7 +9251,7 @@ } }, "ResetSnapshotAttributeRequest": { - "base": "

Contains the parameters for ResetSnapshotAttribute.

", + "base": null, "refs": { } }, @@ -9558,7 +9779,9 @@ "SnapshotIdStringList": { "base": null, "refs": { - "DescribeSnapshotsRequest$SnapshotIds": "

The snapshot IDs.

Default: Describes the snapshots for which you have create volume permissions.

" + "DescribeSnapshotsRequest$SnapshotIds": "

The snapshot IDs.

Default: Describes the snapshots for which you have create volume permissions.

", + "DisableFastSnapshotRestoresRequest$SourceSnapshotIds": "

The IDs of one or more snapshots. For example, snap-1234567890abcdef0.

", + "EnableFastSnapshotRestoresRequest$SourceSnapshotIds": "

The IDs of one or more snapshots. For example, snap-1234567890abcdef0. You can specify a snapshot that was shared with you from another AWS account.

" } }, "SnapshotInfo": { @@ -9912,6 +10135,7 @@ "AvailabilityZone$ZoneName": "

The name of the Availability Zone.

", "AvailabilityZone$ZoneId": "

The ID of the Availability Zone.

", "AvailabilityZoneMessage$Message": "

The message about the Availability Zone.

", + "AvailabilityZoneStringList$member": null, "BillingProductList$member": null, "BlockDeviceMapping$DeviceName": "

The device name (for example, /dev/sdh or xvdh).

", "BlockDeviceMapping$VirtualName": "

The virtual device name (ephemeralN). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ephemeral0 and ephemeral1. The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.

NVMe instance store volumes are automatically enumerated and assigned a device name. Including them in your block device mapping has no effect.

Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.

", @@ -10013,6 +10237,7 @@ "CopyImageResult$ImageId": "

The ID of the new AMI.

", "CopySnapshotRequest$Description": "

A description for the EBS snapshot.

", "CopySnapshotRequest$DestinationRegion": "

The destination Region to use in the PresignedUrl parameter of a snapshot copy operation. This parameter is only valid for specifying the destination Region in a PresignedUrl parameter, where it is required.

The snapshot copy is sent to the regional endpoint that you sent the HTTP request to (for example, ec2.us-east-1.amazonaws.com). With the AWS CLI, this is specified using the --region parameter or the default Region in your AWS configuration file.

", + "CopySnapshotRequest$KmsKeyId": "

The identifier of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use for Amazon EBS encryption. If this parameter is not specified, your AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state must be true.

You can specify the CMK using any of the following:

AWS authenticates the CMK asynchronously. Therefore, if you specify an ID, alias, or ARN that is not valid, the action can appear to complete, but eventually fails.

", "CopySnapshotRequest$PresignedUrl": "

When you copy an encrypted source snapshot using the Amazon EC2 Query API, you must supply a pre-signed URL. This parameter is optional for unencrypted snapshots. For more information, see Query Requests.

The PresignedUrl should use the snapshot source endpoint, the CopySnapshot action, and include the SourceRegion, SourceSnapshotId, and DestinationRegion parameters. The PresignedUrl must be signed using AWS Signature Version 4. Because EBS snapshots are stored in Amazon S3, the signing algorithm for this parameter uses the same logic that is described in Authenticating Requests by Using Query Parameters (AWS Signature Version 4) in the Amazon Simple Storage Service API Reference. An invalid or improperly signed PresignedUrl will cause the copy operation to fail asynchronously, and the snapshot will move to an error state.

", "CopySnapshotRequest$SourceRegion": "

The ID of the Region that contains the snapshot to be copied.

", "CopySnapshotRequest$SourceSnapshotId": "

The ID of the EBS snapshot to copy.

", @@ -10033,6 +10258,7 @@ "CreateClientVpnRouteRequest$ClientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

", "CreateCustomerGatewayRequest$PublicIp": "

The Internet-routable IP address for the customer gateway's outside interface. The address must be static.

", "CreateCustomerGatewayRequest$CertificateArn": "

The Amazon Resource Name (ARN) for the customer gateway certificate.

", + "CreateCustomerGatewayRequest$DeviceName": "

A name for the customer gateway device.

Length Constraints: Up to 255 characters.

", "CreateDefaultSubnetRequest$AvailabilityZone": "

The Availability Zone in which to create the default subnet.

", "CreateEgressOnlyInternetGatewayRequest$ClientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to Ensure Idempotency.

", "CreateEgressOnlyInternetGatewayResult$ClientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", @@ -10125,6 +10351,7 @@ "CustomerGateway$CertificateArn": "

The Amazon Resource Name (ARN) for the customer gateway certificate.

", "CustomerGateway$State": "

The current state of the customer gateway (pending | available | deleting | deleted).

", "CustomerGateway$Type": "

The type of VPN connection the customer gateway supports (ipsec.1).

", + "CustomerGateway$DeviceName": "

The name of customer gateway device.

", "CustomerGatewayIdStringList$member": null, "DeleteClientVpnRouteRequest$TargetVpcSubnetId": "

The ID of the target subnet used by the route.

", "DeleteClientVpnRouteRequest$DestinationCidrBlock": "

The IPv4 address range, in CIDR notation, of the route to be deleted.

", @@ -10166,6 +10393,11 @@ "DescribeEgressOnlyInternetGatewaysResult$NextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", "DescribeElasticGpusRequest$NextToken": "

The token to request the next page of results.

", "DescribeElasticGpusResult$NextToken": "

The token to use to retrieve the next page of results. This value is null when there are no more results to return.

", + "DescribeFastSnapshotRestoreSuccessItem$SnapshotId": "

The ID of the snapshot.

", + "DescribeFastSnapshotRestoreSuccessItem$AvailabilityZone": "

The Availability Zone.

", + "DescribeFastSnapshotRestoreSuccessItem$StateTransitionReason": "

The reason for the state transition. The possible values are as follows:

", + "DescribeFastSnapshotRestoreSuccessItem$OwnerId": "

The ID of the AWS account that owns the snapshot.

", + "DescribeFastSnapshotRestoreSuccessItem$OwnerAlias": "

The alias of the snapshot owner.

", "DescribeFleetError$ErrorCode": "

The error code that indicates why the instance could not be launched. For more information about error codes, see Error Codes.

", "DescribeFleetError$ErrorMessage": "

The error message that describes why the instance could not be launched. For more information about error messages, see ee Error Codes.

", "DescribeFleetHistoryRequest$NextToken": "

The token for the next set of results.

", @@ -10310,6 +10542,15 @@ "DhcpOptionsIdStringList$member": null, "DirectoryServiceAuthentication$DirectoryId": "

The ID of the Active Directory used for authentication.

", "DirectoryServiceAuthenticationRequest$DirectoryId": "

The ID of the Active Directory to be used for authentication.

", + "DisableFastSnapshotRestoreErrorItem$SnapshotId": "

The ID of the snapshot.

", + "DisableFastSnapshotRestoreStateError$Code": "

The error code.

", + "DisableFastSnapshotRestoreStateError$Message": "

The error message.

", + "DisableFastSnapshotRestoreStateErrorItem$AvailabilityZone": "

The Availability Zone.

", + "DisableFastSnapshotRestoreSuccessItem$SnapshotId": "

The ID of the snapshot.

", + "DisableFastSnapshotRestoreSuccessItem$AvailabilityZone": "

The Availability Zone.

", + "DisableFastSnapshotRestoreSuccessItem$StateTransitionReason": "

The reason for the state transition. The possible values are as follows:

", + "DisableFastSnapshotRestoreSuccessItem$OwnerId": "

The ID of the AWS account that owns the snapshot.

", + "DisableFastSnapshotRestoreSuccessItem$OwnerAlias": "

The alias of the snapshot owner.

", "DisassociateAddressRequest$AssociationId": "

[EC2-VPC] The association ID. Required for EC2-VPC.

", "DisassociateAddressRequest$PublicIp": "

[EC2-Classic] The Elastic IP address. Required for EC2-Classic.

", "DisassociateClientVpnTargetNetworkRequest$AssociationId": "

The ID of the target network association.

", @@ -10343,6 +10584,15 @@ "ElasticInferenceAcceleratorAssociation$ElasticInferenceAcceleratorArn": "

The Amazon Resource Name (ARN) of the elastic inference accelerator.

", "ElasticInferenceAcceleratorAssociation$ElasticInferenceAcceleratorAssociationId": "

The ID of the association.

", "ElasticInferenceAcceleratorAssociation$ElasticInferenceAcceleratorAssociationState": "

The state of the elastic inference accelerator.

", + "EnableFastSnapshotRestoreErrorItem$SnapshotId": "

The ID of the snapshot.

", + "EnableFastSnapshotRestoreStateError$Code": "

The error code.

", + "EnableFastSnapshotRestoreStateError$Message": "

The error message.

", + "EnableFastSnapshotRestoreStateErrorItem$AvailabilityZone": "

The Availability Zone.

", + "EnableFastSnapshotRestoreSuccessItem$SnapshotId": "

The ID of the snapshot.

", + "EnableFastSnapshotRestoreSuccessItem$AvailabilityZone": "

The Availability Zone.

", + "EnableFastSnapshotRestoreSuccessItem$StateTransitionReason": "

The reason for the state transition. The possible values are as follows:

", + "EnableFastSnapshotRestoreSuccessItem$OwnerId": "

The ID of the AWS account that owns the snapshot.

", + "EnableFastSnapshotRestoreSuccessItem$OwnerAlias": "

The alias of the snapshot owner.

", "EventInformation$EventDescription": "

The description of the event.

", "EventInformation$EventSubType": "

The event.

The following are the error events:

The following are the fleetRequestChange events:

The following are the instanceChange events:

The following are the Information events:

", "EventInformation$InstanceId": "

The ID of the instance. This information is available only for instanceChange events.

", @@ -10690,6 +10940,8 @@ "ModifyInstanceAttributeRequest$Value": "

A new value for the attribute. Use only with the kernel, ramdisk, userData, disableApiTermination, or instanceInitiatedShutdownBehavior attribute.

", "ModifyInstanceCreditSpecificationRequest$ClientToken": "

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", "ModifyInstanceEventStartTimeRequest$InstanceEventId": "

The ID of the event whose date and time you are modifying.

", + "ModifyInstanceMetadataOptionsRequest$InstanceId": "

The ID of the instance.

", + "ModifyInstanceMetadataOptionsResult$InstanceId": "

The ID of the instance.

", "ModifyLaunchTemplateRequest$ClientToken": "

Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency.

Constraint: Maximum 128 ASCII characters.

", "ModifyLaunchTemplateRequest$DefaultVersion": "

The version number of the launch template to set as the default version.

", "ModifyReservedInstancesRequest$ClientToken": "

A unique, case-sensitive token you provide to ensure idempotency of your modification request. For more information, see Ensuring Idempotency.

", @@ -11383,6 +11635,7 @@ "ClassicLinkInstance$Tags": "

Any tags assigned to the instance.

", "ClientVpnEndpoint$Tags": "

Any tags assigned to the Client VPN endpoint.

", "ConversionTask$Tags": "

Any tags assigned to the task.

", + "CopySnapshotResult$Tags": "

Any tags applied to the new snapshot.

", "CreateTagsRequest$Tags": "

The tags. The value parameter is required, but if you don't want the tag to have a value, specify the parameter with no value, and we set the value to an empty string.

", "CustomerGateway$Tags": "

Any tags assigned to the customer gateway.

", "DeleteTagsRequest$Tags": "

The tags to delete. Specify a tag key and an optional tag value to delete specific tags. If you specify a tag key without a tag value, we delete any tag with this key regardless of its value. If you specify a tag key with an empty string as the tag value, we delete the tag only if its value is an empty string.

If you omit this parameter, we delete all user-defined tags for the specified resources. We do not delete AWS-generated tags (tags that have the aws: prefix).

", @@ -11438,6 +11691,7 @@ "base": null, "refs": { "AllocateHostsRequest$TagSpecifications": "

The tags to apply to the Dedicated Host during creation.

", + "CopySnapshotRequest$TagSpecifications": "

The tags to apply to the new snapshot.

", "CreateCapacityReservationRequest$TagSpecifications": "

The tags to apply to the Capacity Reservation during launch.

", "CreateClientVpnEndpointRequest$TagSpecifications": "

The tags to apply to the Client VPN endpoint during creation.

", "CreateFleetRequest$TagSpecifications": "

The key-value pair for tagging the EC2 Fleet request on creation. The value for ResourceType must be fleet, otherwise the fleet request fails. To tag instances at launch, specify the tags in the launch template. For information about tagging after launch, see Tagging Your Resources.

", diff --git a/models/apis/ec2/2016-11-15/paginators-1.json b/models/apis/ec2/2016-11-15/paginators-1.json index 9789e391180..bec922cfe95 100755 --- a/models/apis/ec2/2016-11-15/paginators-1.json +++ b/models/apis/ec2/2016-11-15/paginators-1.json @@ -78,9 +78,21 @@ "output_token": "NextToken", "result_key": "EgressOnlyInternetGateways" }, + "DescribeExportImageTasks": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ExportImageTasks" + }, "DescribeExportTasks": { "result_key": "ExportTasks" }, + "DescribeFastSnapshotRestores": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "FastSnapshotRestores" + }, "DescribeFleets": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/models/apis/ec2/2016-11-15/waiters-2.json b/models/apis/ec2/2016-11-15/waiters-2.json index 33ea7b04716..31c1513e3c8 100755 --- a/models/apis/ec2/2016-11-15/waiters-2.json +++ b/models/apis/ec2/2016-11-15/waiters-2.json @@ -379,6 +379,24 @@ } ] }, + "SecurityGroupExists": { + "operation": "DescribeSecurityGroups", + "delay": 5, + "maxAttempts": 6, + "acceptors": [ + { + "expected": true, + "matcher": "path", + "state": "success", + "argument": "length(SecurityGroups[].GroupId) > `0`" + }, + { + "expected": "InvalidGroupNotFound", + "matcher": "error", + "state": "retry" + } + ] + }, "SpotInstanceRequestFulfilled": { "operation": "DescribeSpotInstanceRequests", "maxAttempts": 40, diff --git a/models/apis/ecs/2014-11-13/api-2.json b/models/apis/ecs/2014-11-13/api-2.json index 61451902f4d..6f8fcbdf504 100644 --- a/models/apis/ecs/2014-11-13/api-2.json +++ b/models/apis/ecs/2014-11-13/api-2.json @@ -1472,7 +1472,8 @@ "type":"structure", "members":{ "arn":{"shape":"String"}, - "reason":{"shape":"String"} + "reason":{"shape":"String"}, + "detail":{"shape":"String"} } }, "Failures":{ @@ -2144,19 +2145,20 @@ "required":["taskDefinition"], "members":{ "cluster":{"shape":"String"}, - "taskDefinition":{"shape":"String"}, - "overrides":{"shape":"TaskOverride"}, "count":{"shape":"BoxedInteger"}, - "startedBy":{"shape":"String"}, + "enableECSManagedTags":{"shape":"Boolean"}, "group":{"shape":"String"}, + "launchType":{"shape":"LaunchType"}, + "networkConfiguration":{"shape":"NetworkConfiguration"}, + "overrides":{"shape":"TaskOverride"}, "placementConstraints":{"shape":"PlacementConstraints"}, "placementStrategy":{"shape":"PlacementStrategies"}, - "launchType":{"shape":"LaunchType"}, "platformVersion":{"shape":"String"}, - "networkConfiguration":{"shape":"NetworkConfiguration"}, + "propagateTags":{"shape":"PropagateTags"}, + "referenceId":{"shape":"String"}, + "startedBy":{"shape":"String"}, "tags":{"shape":"Tags"}, - "enableECSManagedTags":{"shape":"Boolean"}, - "propagateTags":{"shape":"PropagateTags"} + "taskDefinition":{"shape":"String"} } }, "RunTaskResponse":{ @@ -2335,20 +2337,21 @@ "StartTaskRequest":{ "type":"structure", "required":[ - "taskDefinition", - "containerInstances" + "containerInstances", + "taskDefinition" ], "members":{ "cluster":{"shape":"String"}, - "taskDefinition":{"shape":"String"}, - "overrides":{"shape":"TaskOverride"}, "containerInstances":{"shape":"StringList"}, - "startedBy":{"shape":"String"}, + "enableECSManagedTags":{"shape":"Boolean"}, "group":{"shape":"String"}, "networkConfiguration":{"shape":"NetworkConfiguration"}, + "overrides":{"shape":"TaskOverride"}, + "propagateTags":{"shape":"PropagateTags"}, + "referenceId":{"shape":"String"}, + "startedBy":{"shape":"String"}, "tags":{"shape":"Tags"}, - "enableECSManagedTags":{"shape":"Boolean"}, - "propagateTags":{"shape":"PropagateTags"} + "taskDefinition":{"shape":"String"} } }, "StartTaskResponse":{ @@ -2509,36 +2512,38 @@ "Task":{ "type":"structure", "members":{ - "taskArn":{"shape":"String"}, + "attachments":{"shape":"Attachments"}, + "attributes":{"shape":"Attributes"}, + "availabilityZone":{"shape":"String"}, "clusterArn":{"shape":"String"}, - "taskDefinitionArn":{"shape":"String"}, + "connectivity":{"shape":"Connectivity"}, + "connectivityAt":{"shape":"Timestamp"}, "containerInstanceArn":{"shape":"String"}, - "overrides":{"shape":"TaskOverride"}, - "lastStatus":{"shape":"String"}, - "desiredStatus":{"shape":"String"}, + "containers":{"shape":"Containers"}, "cpu":{"shape":"String"}, + "createdAt":{"shape":"Timestamp"}, + "desiredStatus":{"shape":"String"}, + "executionStoppedAt":{"shape":"Timestamp"}, + "group":{"shape":"String"}, + "healthStatus":{"shape":"HealthStatus"}, + "inferenceAccelerators":{"shape":"InferenceAccelerators"}, + "lastStatus":{"shape":"String"}, + "launchType":{"shape":"LaunchType"}, "memory":{"shape":"String"}, - "containers":{"shape":"Containers"}, - "startedBy":{"shape":"String"}, - "version":{"shape":"Long"}, - "stoppedReason":{"shape":"String"}, - "stopCode":{"shape":"TaskStopCode"}, - "connectivity":{"shape":"Connectivity"}, - "connectivityAt":{"shape":"Timestamp"}, + "overrides":{"shape":"TaskOverride"}, + "platformVersion":{"shape":"String"}, "pullStartedAt":{"shape":"Timestamp"}, "pullStoppedAt":{"shape":"Timestamp"}, - "executionStoppedAt":{"shape":"Timestamp"}, - "createdAt":{"shape":"Timestamp"}, "startedAt":{"shape":"Timestamp"}, - "stoppingAt":{"shape":"Timestamp"}, + "startedBy":{"shape":"String"}, + "stopCode":{"shape":"TaskStopCode"}, "stoppedAt":{"shape":"Timestamp"}, - "group":{"shape":"String"}, - "launchType":{"shape":"LaunchType"}, - "platformVersion":{"shape":"String"}, - "attachments":{"shape":"Attachments"}, - "healthStatus":{"shape":"HealthStatus"}, + "stoppedReason":{"shape":"String"}, + "stoppingAt":{"shape":"Timestamp"}, "tags":{"shape":"Tags"}, - "inferenceAccelerators":{"shape":"InferenceAccelerators"} + "taskArn":{"shape":"String"}, + "taskDefinitionArn":{"shape":"String"}, + "version":{"shape":"Long"} } }, "TaskDefinition":{ @@ -2559,6 +2564,7 @@ "requiresCompatibilities":{"shape":"CompatibilityList"}, "cpu":{"shape":"String"}, "memory":{"shape":"String"}, + "inferenceAccelerators":{"shape":"InferenceAccelerators"}, "pidMode":{"shape":"PidMode"}, "ipcMode":{"shape":"IpcMode"}, "proxyConfiguration":{"shape":"ProxyConfiguration"} @@ -2614,9 +2620,11 @@ "type":"structure", "members":{ "containerOverrides":{"shape":"ContainerOverrides"}, + "cpu":{"shape":"String"}, "inferenceAcceleratorOverrides":{"shape":"InferenceAcceleratorOverrides"}, - "taskRoleArn":{"shape":"String"}, - "executionRoleArn":{"shape":"String"} + "executionRoleArn":{"shape":"String"}, + "memory":{"shape":"String"}, + "taskRoleArn":{"shape":"String"} } }, "TaskSet":{ diff --git a/models/apis/ecs/2014-11-13/docs-2.json b/models/apis/ecs/2014-11-13/docs-2.json index 396ce01f4e8..317a6f4198c 100644 --- a/models/apis/ecs/2014-11-13/docs-2.json +++ b/models/apis/ecs/2014-11-13/docs-2.json @@ -119,7 +119,8 @@ "ListAttributesResponse$attributes": "

A list of attribute objects that meet the criteria of the request.

", "PutAttributesRequest$attributes": "

The attributes to apply to your resource. You can specify up to 10 custom attributes per resource. You can specify up to 10 attributes in a single call.

", "PutAttributesResponse$attributes": "

The attributes applied to your resource.

", - "RegisterContainerInstanceRequest$attributes": "

The container instance attributes that this container instance supports.

" + "RegisterContainerInstanceRequest$attributes": "

The container instance attributes that this container instance supports.

", + "Task$attributes": "

The attributes of the task

" } }, "AwsVpcConfiguration": { @@ -169,8 +170,8 @@ "Container$exitCode": "

The exit code returned from the container.

", "ContainerDefinition$memory": "

The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task memory value, if one is specified. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If using the Fargate launch type, this parameter is optional.

If using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level memory and memoryReservation value, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

", "ContainerDefinition$memoryReservation": "

The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the memory parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to MemoryReservation in the Create a container section of the Docker Remote API and the --memory-reservation option to docker run.

If a task-level memory value is not specified, you must specify a non-zero integer for one or both of memory or memoryReservation in a container definition. If you specify both, memory must be greater than memoryReservation. If you specify memoryReservation, then that value is subtracted from the available memory resources for the container instance on which the container is placed. Otherwise, the value of memory is used.

For example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a memoryReservation of 128 MiB, and a memory hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.

The Docker daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.

", - "ContainerDefinition$startTimeout": "

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED state.

For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.

", - "ContainerDefinition$stopTimeout": "

Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own. For tasks using the Fargate launch type, the max stopTimeout value is 2 minutes. This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.

For tasks using the EC2 launch type, the stop timeout value for the container takes precedence over the ECS_CONTAINER_STOP_TIMEOUT container agent configuration parameter, if used. Container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

", + "ContainerDefinition$startTimeout": "

Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a COMPLETE, SUCCESS, or HEALTHY status. If a startTimeout value is specified for containerB and it does not reach the desired status within that time then containerA will give up and not start. This results in the task transitioning to a STOPPED state.

For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

For tasks using the Fargate launch type, the task or service requires platform version 1.3.0 or later.

", + "ContainerDefinition$stopTimeout": "

Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.

For tasks using the Fargate launch type, the max stopTimeout value is 2 minutes and the task or service requires platform version 1.3.0 or later.

For tasks using the EC2 launch type, the stop timeout value for the container takes precedence over the ECS_CONTAINER_STOP_TIMEOUT container agent configuration parameter, if used. Container instances require at least version 1.26.0 of the container agent to enable a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

", "ContainerOverride$cpu": "

The number of cpu units reserved for the container, instead of the default value from the task definition. You must also specify a container name.

", "ContainerOverride$memory": "

The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name.

", "ContainerOverride$memoryReservation": "

The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. You must also specify a container name.

", @@ -327,7 +328,7 @@ "ContainerDependencies": { "base": null, "refs": { - "ContainerDefinition$dependsOn": "

The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.

For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

This parameter is available for tasks using the Fargate launch type in the Ohio (us-east-2) region only and the task or service requires platform version 1.3.0 or later.

" + "ContainerDefinition$dependsOn": "

The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.

For tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to enable container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you are using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

For tasks using the Fargate launch type, the task or service requires platform version 1.3.0 or later.

" } }, "ContainerDependency": { @@ -756,7 +757,8 @@ "base": null, "refs": { "RegisterTaskDefinitionRequest$inferenceAccelerators": "

The Elastic Inference accelerators to use for the containers in the task.

", - "Task$inferenceAccelerators": "

The Elastic Inference accelerator associated with the task.

" + "Task$inferenceAccelerators": "

The Elastic Inference accelerator associated with the task.

", + "TaskDefinition$inferenceAccelerators": "

The Elastic Inference accelerator associated with the task.

" } }, "Integer": { @@ -953,7 +955,7 @@ "LogDriver": { "base": null, "refs": { - "LogConfiguration$logDriver": "

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

For tasks using the Fargate launch type, the supported log drivers are awslogs, splunk, and awsfirelens.

For tasks using the EC2 launch type, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, splunk, and awsfirelens.

For more information about using the awslogs log driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

For more information about using the awsfirelens log driver, see Custom Log Routing in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" + "LogConfiguration$logDriver": "

The log driver to use for the container. The valid values listed for this parameter are log drivers that the Amazon ECS container agent can communicate with by default.

For tasks using the Fargate launch type, the supported log drivers are awslogs and splunk.

For tasks using the EC2 launch type, the supported log drivers are awslogs, fluentd, gelf, json-file, journald, logentries, syslog, and splunk.

For more information about using the awslogs log driver, see Using the awslogs Log Driver in the Amazon Elastic Container Service Developer Guide.

If you have a custom driver that is not listed above that you would like to work with the Amazon ECS container agent, you can fork the Amazon ECS container agent project that is available on GitHub and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, Amazon Web Services does not currently support running modified copies of this software.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

" } }, "Long": { @@ -1518,6 +1520,7 @@ "DockerVolumeConfiguration$driver": "

The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use docker plugin ls to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. For more information, see Docker plugin discovery. This parameter maps to Driver in the Create a volume section of the Docker Remote API and the xxdriver option to docker volume create.

", "Failure$arn": "

The Amazon Resource Name (ARN) of the failed resource.

", "Failure$reason": "

The reason for the failure.

", + "Failure$detail": "

The details of the failure.

", "FirelensConfigurationOptionsMap$key": null, "FirelensConfigurationOptionsMap$value": null, "GpuIds$member": null, @@ -1532,33 +1535,33 @@ "KeyValuePair$value": "

The value of the key-value pair. For environment variables, this is the value of the environment variable.

", "ListAccountSettingsRequest$value": "

The value of the account settings with which to filter results. You must also specify an account setting name to use this parameter.

", "ListAccountSettingsRequest$principalArn": "

The ARN of the principal, which can be an IAM user, IAM role, or the root user. If this field is omitted, the account settings are listed only for the authenticated user.

", - "ListAccountSettingsRequest$nextToken": "

The nextToken value returned from a previous paginated ListAccountSettings request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListAccountSettingsRequest$nextToken": "

The nextToken value returned from a ListAccountSettings request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults was provided, it is possible the number of results to be fewer than maxResults.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", "ListAccountSettingsResponse$nextToken": "

The nextToken value to include in a future ListAccountSettings request. When the results of a ListAccountSettings request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", "ListAttributesRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster to list attributes. If you do not specify a cluster, the default cluster is assumed.

", "ListAttributesRequest$attributeName": "

The name of the attribute with which to filter the results.

", "ListAttributesRequest$attributeValue": "

The value of the attribute with which to filter results. You must also specify an attribute name to use this parameter.

", - "ListAttributesRequest$nextToken": "

The nextToken value returned from a previous paginated ListAttributes request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListAttributesRequest$nextToken": "

The nextToken value returned from a ListAttributes request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults was provided, it is possible the number of results to be fewer than maxResults.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", "ListAttributesResponse$nextToken": "

The nextToken value to include in a future ListAttributes request. When the results of a ListAttributes request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", - "ListClustersRequest$nextToken": "

The nextToken value returned from a previous paginated ListClusters request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListClustersRequest$nextToken": "

The nextToken value returned from a ListClusters request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults was provided, it is possible the number of results to be fewer than maxResults.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", "ListClustersResponse$nextToken": "

The nextToken value to include in a future ListClusters request. When the results of a ListClusters request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", "ListContainerInstancesRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the container instances to list. If you do not specify a cluster, the default cluster is assumed.

", "ListContainerInstancesRequest$filter": "

You can filter the results of a ListContainerInstances operation with cluster query language statements. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

", - "ListContainerInstancesRequest$nextToken": "

The nextToken value returned from a previous paginated ListContainerInstances request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListContainerInstancesRequest$nextToken": "

The nextToken value returned from a ListContainerInstances request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults was provided, it is possible the number of results to be fewer than maxResults.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", "ListContainerInstancesResponse$nextToken": "

The nextToken value to include in a future ListContainerInstances request. When the results of a ListContainerInstances request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", "ListServicesRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the services to list. If you do not specify a cluster, the default cluster is assumed.

", - "ListServicesRequest$nextToken": "

The nextToken value returned from a previous paginated ListServices request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListServicesRequest$nextToken": "

The nextToken value returned from a ListServices request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults was provided, it is possible the number of results to be fewer than maxResults.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", "ListServicesResponse$nextToken": "

The nextToken value to include in a future ListServices request. When the results of a ListServices request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", "ListTagsForResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Amazon ECS tasks, services, task definitions, clusters, and container instances.

", "ListTaskDefinitionFamiliesRequest$familyPrefix": "

The familyPrefix is a string that is used to filter the results of ListTaskDefinitionFamilies. If you specify a familyPrefix, only task definition family names that begin with the familyPrefix string are returned.

", - "ListTaskDefinitionFamiliesRequest$nextToken": "

The nextToken value returned from a previous paginated ListTaskDefinitionFamilies request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListTaskDefinitionFamiliesRequest$nextToken": "

The nextToken value returned from a ListTaskDefinitionFamilies request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults was provided, it is possible the number of results to be fewer than maxResults.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", "ListTaskDefinitionFamiliesResponse$nextToken": "

The nextToken value to include in a future ListTaskDefinitionFamilies request. When the results of a ListTaskDefinitionFamilies request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", "ListTaskDefinitionsRequest$familyPrefix": "

The full family name with which to filter the ListTaskDefinitions results. Specifying a familyPrefix limits the listed task definitions to task definition revisions that belong to that family.

", - "ListTaskDefinitionsRequest$nextToken": "

The nextToken value returned from a previous paginated ListTaskDefinitions request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListTaskDefinitionsRequest$nextToken": "

The nextToken value returned from a ListTaskDefinitions request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults was provided, it is possible the number of results to be fewer than maxResults.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", "ListTaskDefinitionsResponse$nextToken": "

The nextToken value to include in a future ListTaskDefinitions request. When the results of a ListTaskDefinitions request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", "ListTasksRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the tasks to list. If you do not specify a cluster, the default cluster is assumed.

", "ListTasksRequest$containerInstance": "

The container instance ID or full ARN of the container instance with which to filter the ListTasks results. Specifying a containerInstance limits the results to tasks that belong to that container instance.

", "ListTasksRequest$family": "

The name of the family with which to filter the ListTasks results. Specifying a family limits the results to tasks that belong to that family.

", - "ListTasksRequest$nextToken": "

The nextToken value returned from a previous paginated ListTasks request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListTasksRequest$nextToken": "

The nextToken value returned from a ListTasks request indicating that more results are available to fulfill the request and further calls will be needed. If maxResults was provided, it is possible the number of results to be fewer than maxResults.

This token should be treated as an opaque identifier that is only used to retrieve the next items in a list and not for other programmatic purposes.

", "ListTasksRequest$startedBy": "

The startedBy value with which to filter the task results. Specifying a startedBy value limits the results to tasks that were started with that value.

", "ListTasksRequest$serviceName": "

The name of the service with which to filter the ListTasks results. Specifying a serviceName limits the results to tasks that belong to that service.

", "ListTasksResponse$nextToken": "

The nextToken value to include in a future ListTasks request. When the results of a ListTasks request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", @@ -1595,10 +1598,11 @@ "Resource$type": "

The type of the resource, such as INTEGER, DOUBLE, LONG, or STRINGSET.

", "ResourceRequirement$value": "

The value for the specified resource type.

If the GPU type is used, the value is the number of physical GPUs the Amazon ECS container agent will reserve for the container. The number of GPUs reserved for all containers in a task should not exceed the number of available GPUs on the container instance the task is launched on.

If the InferenceAccelerator type is used, the value should match the deviceName for an InferenceAccelerator specified in a task definition.

", "RunTaskRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster on which to run your task. If you do not specify a cluster, the default cluster is assumed.

", - "RunTaskRequest$taskDefinition": "

The family and revision (family:revision) or full ARN of the task definition to run. If a revision is not specified, the latest ACTIVE revision is used.

", - "RunTaskRequest$startedBy": "

An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

", "RunTaskRequest$group": "

The name of the task group to associate with the task. The default value is the family name of the task definition (for example, family:my-family-name).

", "RunTaskRequest$platformVersion": "

The platform version the task should run. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

", + "RunTaskRequest$referenceId": "

The reference ID to use for the task.

", + "RunTaskRequest$startedBy": "

An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

", + "RunTaskRequest$taskDefinition": "

The family and revision (family:revision) or full ARN of the task definition to run. If a revision is not specified, the latest ACTIVE revision is used.

", "Secret$name": "

The name of the secret.

", "Secret$valueFrom": "

The secret to expose to the container. The supported values are either the full ARN of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store.

If the AWS Systems Manager Parameter Store parameter exists in the same Region as the task you are launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

", "ServerException$message": null, @@ -1617,9 +1621,10 @@ "Setting$value": "

Whether the account setting is enabled or disabled for the specified resource.

", "Setting$principalArn": "

The ARN of the principal, which can be an IAM user, IAM role, or the root user. If this field is omitted, the authenticated user is assumed.

", "StartTaskRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster on which to start your task. If you do not specify a cluster, the default cluster is assumed.

", - "StartTaskRequest$taskDefinition": "

The family and revision (family:revision) or full ARN of the task definition to start. If a revision is not specified, the latest ACTIVE revision is used.

", - "StartTaskRequest$startedBy": "

An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

", "StartTaskRequest$group": "

The name of the task group to associate with the task. The default value is the family name of the task definition (for example, family:my-family-name).

", + "StartTaskRequest$referenceId": "

The reference ID to use for the task.

", + "StartTaskRequest$startedBy": "

An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

If a task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

", + "StartTaskRequest$taskDefinition": "

The family and revision (family:revision) or full ARN of the task definition to start. If a revision is not specified, the latest ACTIVE revision is used.

", "StopTaskRequest$cluster": "

The short name or full Amazon Resource Name (ARN) of the cluster that hosts the task to stop. If you do not specify a cluster, the default cluster is assumed.

", "StopTaskRequest$task": "

The task ID or full Amazon Resource Name (ARN) of the task to stop.

", "StopTaskRequest$reason": "

An optional message specified when a task is stopped. For example, if you are using a custom scheduler, you can use this parameter to specify the reason for stopping the task here, and the message appears in subsequent DescribeTasks API operations on this task. Up to 255 characters are allowed in this message.

", @@ -1643,18 +1648,19 @@ "SystemControl$namespace": "

The namespaced kernel parameter for which to set a value.

", "SystemControl$value": "

The value for the namespaced kernel parameter specified in namespace.

", "TagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, the supported resources are Amazon ECS tasks, services, task definitions, clusters, and container instances.

", - "Task$taskArn": "

The Amazon Resource Name (ARN) of the task.

", + "Task$availabilityZone": "

The availability zone of the task.

", "Task$clusterArn": "

The ARN of the cluster that hosts the task.

", - "Task$taskDefinitionArn": "

The ARN of the task definition that creates the task.

", "Task$containerInstanceArn": "

The ARN of the container instances that host the task.

", - "Task$lastStatus": "

The last known status of the task. For more information, see Task Lifecycle.

", - "Task$desiredStatus": "

The desired status of the task. For more information, see Task Lifecycle.

", "Task$cpu": "

The number of CPU units used by the task as expressed in a task definition. It can be expressed as an integer using CPU units, for example 1024. It can also be expressed as a string using vCPUs, for example 1 vCPU or 1 vcpu. String values are converted to an integer indicating the CPU units when the task definition is registered.

If you are using the EC2 launch type, this field is optional. Supported values are between 128 CPU units (0.125 vCPUs) and 10240 CPU units (10 vCPUs).

If you are using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the memory parameter:

", + "Task$desiredStatus": "

The desired status of the task. For more information, see Task Lifecycle.

", + "Task$group": "

The name of the task group associated with the task.

", + "Task$lastStatus": "

The last known status of the task. For more information, see Task Lifecycle.

", "Task$memory": "

The amount of memory (in MiB) used by the task as expressed in a task definition. It can be expressed as an integer using MiB, for example 1024. It can also be expressed as a string using GB, for example 1GB or 1 GB. String values are converted to an integer indicating the MiB when the task definition is registered.

If you are using the EC2 launch type, this field is optional.

If you are using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of supported values for the cpu parameter:

", + "Task$platformVersion": "

The platform version on which your task is running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

", "Task$startedBy": "

The tag specified when a task is started. If the task is started by an Amazon ECS service, then the startedBy parameter contains the deployment ID of the service that starts it.

", "Task$stoppedReason": "

The reason that the task was stopped.

", - "Task$group": "

The name of the task group associated with the task.

", - "Task$platformVersion": "

The platform version on which your task is running. A platform version is only specified for tasks using the Fargate launch type. If one is not specified, the LATEST platform version is used by default. For more information, see AWS Fargate Platform Versions in the Amazon Elastic Container Service Developer Guide.

", + "Task$taskArn": "

The Amazon Resource Name (ARN) of the task.

", + "Task$taskDefinitionArn": "

The ARN of the task definition that creates the task.

", "TaskDefinition$taskDefinitionArn": "

The full Amazon Resource Name (ARN) of the task definition.

", "TaskDefinition$family": "

The name of a family that this task definition is registered to. A family groups multiple versions of a task definition. Amazon ECS gives the first task definition that you registered to a family a revision number of 1. Amazon ECS gives sequential revision numbers to each task definition that you add.

", "TaskDefinition$taskRoleArn": "

The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see Amazon ECS Task Role in the Amazon Elastic Container Service Developer Guide.

IAM roles for tasks on Windows require that the -EnableTaskIAMRole option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code in order to take advantage of the feature. For more information, see Windows IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide.

", @@ -1662,8 +1668,10 @@ "TaskDefinition$cpu": "

The number of cpu units used by the task. If you are using the EC2 launch type, this field is optional and any value can be used. If you are using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the memory parameter:

", "TaskDefinition$memory": "

The amount (in MiB) of memory used by the task.

If using the EC2 launch type, this field is optional and any value can be used. If a task-level memory value is specified then the container-level memory value is optional.

If using the Fargate launch type, this field is required and you must use one of the following values, which determines your range of valid values for the cpu parameter:

", "TaskDefinitionPlacementConstraint$expression": "

A cluster query language expression to apply to the constraint. For more information, see Cluster Query Language in the Amazon Elastic Container Service Developer Guide.

", - "TaskOverride$taskRoleArn": "

The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.

", + "TaskOverride$cpu": "

The cpu override for the task.

", "TaskOverride$executionRoleArn": "

The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.

", + "TaskOverride$memory": "

The memory override for the task.

", + "TaskOverride$taskRoleArn": "

The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.

", "TaskSet$id": "

The ID of the task set.

", "TaskSet$taskSetArn": "

The Amazon Resource Name (ARN) of the task set.

", "TaskSet$serviceArn": "

The Amazon Resource Name (ARN) of the service the task set exists in.

", @@ -1970,13 +1978,13 @@ "SubmitTaskStateChangeRequest$pullStoppedAt": "

The Unix timestamp for when the container image pull completed.

", "SubmitTaskStateChangeRequest$executionStoppedAt": "

The Unix timestamp for when the task execution stopped.

", "Task$connectivityAt": "

The Unix timestamp for when the task last went into CONNECTED status.

", + "Task$createdAt": "

The Unix timestamp for when the task was created (the task entered the PENDING state).

", + "Task$executionStoppedAt": "

The Unix timestamp for when the task execution stopped.

", "Task$pullStartedAt": "

The Unix timestamp for when the container image pull began.

", "Task$pullStoppedAt": "

The Unix timestamp for when the container image pull completed.

", - "Task$executionStoppedAt": "

The Unix timestamp for when the task execution stopped.

", - "Task$createdAt": "

The Unix timestamp for when the task was created (the task entered the PENDING state).

", "Task$startedAt": "

The Unix timestamp for when the task started (the task transitioned from the PENDING state to the RUNNING state).

", - "Task$stoppingAt": "

The Unix timestamp for when the task stops (transitions from the RUNNING state to STOPPED).

", "Task$stoppedAt": "

The Unix timestamp for when the task was stopped (the task transitioned from the RUNNING state to the STOPPED state).

", + "Task$stoppingAt": "

The Unix timestamp for when the task stops (transitions from the RUNNING state to STOPPED).

", "TaskSet$createdAt": "

The Unix timestamp for when the task set was created.

", "TaskSet$updatedAt": "

The Unix timestamp for when the task set was last updated.

", "TaskSet$stabilityStatusAt": "

The Unix timestamp for when the task set stability status was retrieved.

" diff --git a/models/apis/eks/2017-11-01/api-2.json b/models/apis/eks/2017-11-01/api-2.json index 9b4beae09e7..2b60a3dafdc 100644 --- a/models/apis/eks/2017-11-01/api-2.json +++ b/models/apis/eks/2017-11-01/api-2.json @@ -31,6 +31,24 @@ {"shape":"UnsupportedAvailabilityZoneException"} ] }, + "CreateNodegroup":{ + "name":"CreateNodegroup", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/node-groups" + }, + "input":{"shape":"CreateNodegroupRequest"}, + "output":{"shape":"CreateNodegroupResponse"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"} + ] + }, "DeleteCluster":{ "name":"DeleteCluster", "http":{ @@ -47,6 +65,23 @@ {"shape":"ServiceUnavailableException"} ] }, + "DeleteNodegroup":{ + "name":"DeleteNodegroup", + "http":{ + "method":"DELETE", + "requestUri":"/clusters/{name}/node-groups/{nodegroupName}" + }, + "input":{"shape":"DeleteNodegroupRequest"}, + "output":{"shape":"DeleteNodegroupResponse"}, + "errors":[ + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"} + ] + }, "DescribeCluster":{ "name":"DescribeCluster", "http":{ @@ -62,6 +97,22 @@ {"shape":"ServiceUnavailableException"} ] }, + "DescribeNodegroup":{ + "name":"DescribeNodegroup", + "http":{ + "method":"GET", + "requestUri":"/clusters/{name}/node-groups/{nodegroupName}" + }, + "input":{"shape":"DescribeNodegroupRequest"}, + "output":{"shape":"DescribeNodegroupResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"} + ] + }, "DescribeUpdate":{ "name":"DescribeUpdate", "http":{ @@ -92,6 +143,22 @@ {"shape":"ServiceUnavailableException"} ] }, + "ListNodegroups":{ + "name":"ListNodegroups", + "http":{ + "method":"GET", + "requestUri":"/clusters/{name}/node-groups" + }, + "input":{"shape":"ListNodegroupsRequest"}, + "output":{"shape":"ListNodegroupsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -179,9 +246,60 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidRequestException"} ] + }, + "UpdateNodegroupConfig":{ + "name":"UpdateNodegroupConfig", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/node-groups/{nodegroupName}/update-config" + }, + "input":{"shape":"UpdateNodegroupConfigRequest"}, + "output":{"shape":"UpdateNodegroupConfigResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} + ] + }, + "UpdateNodegroupVersion":{ + "name":"UpdateNodegroupVersion", + "http":{ + "method":"POST", + "requestUri":"/clusters/{name}/node-groups/{nodegroupName}/update-version" + }, + "input":{"shape":"UpdateNodegroupVersionRequest"}, + "output":{"shape":"UpdateNodegroupVersionResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServerException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"} + ] } }, "shapes":{ + "AMITypes":{ + "type":"string", + "enum":[ + "AL2_x86_64", + "AL2_x86_64_GPU" + ] + }, + "AutoScalingGroup":{ + "type":"structure", + "members":{ + "name":{"shape":"String"} + } + }, + "AutoScalingGroupList":{ + "type":"list", + "member":{"shape":"AutoScalingGroup"} + }, "BadRequestException":{ "type":"structure", "members":{ @@ -195,6 +313,15 @@ "type":"boolean", "box":true }, + "BoxedInteger":{ + "type":"integer", + "box":true + }, + "Capacity":{ + "type":"integer", + "box":true, + "min":1 + }, "Certificate":{ "type":"structure", "members":{ @@ -205,6 +332,7 @@ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":400}, @@ -241,7 +369,8 @@ "CREATING", "ACTIVE", "DELETING", - "FAILED" + "FAILED", + "UPDATING" ] }, "CreateClusterRequest":{ @@ -270,6 +399,44 @@ "cluster":{"shape":"Cluster"} } }, + "CreateNodegroupRequest":{ + "type":"structure", + "required":[ + "clusterName", + "nodegroupName", + "subnets", + "nodeRole" + ], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "nodegroupName":{"shape":"String"}, + "scalingConfig":{"shape":"NodegroupScalingConfig"}, + "diskSize":{"shape":"BoxedInteger"}, + "subnets":{"shape":"StringList"}, + "instanceTypes":{"shape":"StringList"}, + "amiType":{"shape":"AMITypes"}, + "remoteAccess":{"shape":"RemoteAccessConfig"}, + "nodeRole":{"shape":"String"}, + "labels":{"shape":"labelsMap"}, + "tags":{"shape":"TagMap"}, + "clientRequestToken":{ + "shape":"String", + "idempotencyToken":true + }, + "version":{"shape":"String"}, + "releaseVersion":{"shape":"String"} + } + }, + "CreateNodegroupResponse":{ + "type":"structure", + "members":{ + "nodegroup":{"shape":"Nodegroup"} + } + }, "DeleteClusterRequest":{ "type":"structure", "required":["name"], @@ -287,6 +454,31 @@ "cluster":{"shape":"Cluster"} } }, + "DeleteNodegroupRequest":{ + "type":"structure", + "required":[ + "clusterName", + "nodegroupName" + ], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "nodegroupName":{ + "shape":"String", + "location":"uri", + "locationName":"nodegroupName" + } + } + }, + "DeleteNodegroupResponse":{ + "type":"structure", + "members":{ + "nodegroup":{"shape":"Nodegroup"} + } + }, "DescribeClusterRequest":{ "type":"structure", "required":["name"], @@ -304,6 +496,31 @@ "cluster":{"shape":"Cluster"} } }, + "DescribeNodegroupRequest":{ + "type":"structure", + "required":[ + "clusterName", + "nodegroupName" + ], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "nodegroupName":{ + "shape":"String", + "location":"uri", + "locationName":"nodegroupName" + } + } + }, + "DescribeNodegroupResponse":{ + "type":"structure", + "members":{ + "nodegroup":{"shape":"Nodegroup"} + } + }, "DescribeUpdateRequest":{ "type":"structure", "required":[ @@ -320,6 +537,11 @@ "shape":"String", "location":"uri", "locationName":"updateId" + }, + "nodegroupName":{ + "shape":"String", + "location":"querystring", + "locationName":"nodegroupName" } } }, @@ -339,7 +561,10 @@ "AccessDenied", "OperationNotPermitted", "VpcIdNotFound", - "Unknown" + "Unknown", + "NodeCreationFailure", + "PodEvictionFailure", + "InsufficientFreeAddresses" ] }, "ErrorDetail":{ @@ -364,6 +589,7 @@ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":400}, @@ -373,11 +599,24 @@ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":400}, "exception":true }, + "Issue":{ + "type":"structure", + "members":{ + "code":{"shape":"NodegroupIssueCode"}, + "message":{"shape":"String"}, + "resourceIds":{"shape":"StringList"} + } + }, + "IssueList":{ + "type":"list", + "member":{"shape":"Issue"} + }, "ListClustersRequest":{ "type":"structure", "members":{ @@ -406,6 +645,40 @@ "nextToken":{"shape":"String"} } }, + "ListNodegroupsRequest":{ + "type":"structure", + "required":["clusterName"], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "maxResults":{ + "shape":"ListNodegroupsRequestMaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListNodegroupsRequestMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListNodegroupsResponse":{ + "type":"structure", + "members":{ + "nodegroups":{"shape":"StringList"}, + "nextToken":{"shape":"String"} + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -432,6 +705,11 @@ "location":"uri", "locationName":"name" }, + "nodegroupName":{ + "shape":"String", + "location":"querystring", + "locationName":"nodegroupName" + }, "nextToken":{ "shape":"String", "location":"querystring", @@ -488,6 +766,80 @@ "clusterLogging":{"shape":"LogSetups"} } }, + "Nodegroup":{ + "type":"structure", + "members":{ + "nodegroupName":{"shape":"String"}, + "nodegroupArn":{"shape":"String"}, + "clusterName":{"shape":"String"}, + "version":{"shape":"String"}, + "releaseVersion":{"shape":"String"}, + "createdAt":{"shape":"Timestamp"}, + "modifiedAt":{"shape":"Timestamp"}, + "status":{"shape":"NodegroupStatus"}, + "scalingConfig":{"shape":"NodegroupScalingConfig"}, + "instanceTypes":{"shape":"StringList"}, + "subnets":{"shape":"StringList"}, + "remoteAccess":{"shape":"RemoteAccessConfig"}, + "amiType":{"shape":"AMITypes"}, + "nodeRole":{"shape":"String"}, + "labels":{"shape":"labelsMap"}, + "resources":{"shape":"NodegroupResources"}, + "diskSize":{"shape":"BoxedInteger"}, + "health":{"shape":"NodegroupHealth"}, + "tags":{"shape":"TagMap"} + } + }, + "NodegroupHealth":{ + "type":"structure", + "members":{ + "issues":{"shape":"IssueList"} + } + }, + "NodegroupIssueCode":{ + "type":"string", + "enum":[ + "AutoScalingGroupNotFound", + "Ec2SecurityGroupNotFound", + "Ec2SecurityGroupDeletionFailure", + "Ec2LaunchTemplateNotFound", + "Ec2LaunchTemplateVersionMismatch", + "IamInstanceProfileNotFound", + "IamNodeRoleNotFound", + "AsgInstanceLaunchFailures", + "InstanceLimitExceeded", + "InsufficientFreeAddresses", + "AccessDenied", + "InternalFailure" + ] + }, + "NodegroupResources":{ + "type":"structure", + "members":{ + "autoScalingGroups":{"shape":"AutoScalingGroupList"}, + "remoteAccessSecurityGroup":{"shape":"String"} + } + }, + "NodegroupScalingConfig":{ + "type":"structure", + "members":{ + "minSize":{"shape":"Capacity"}, + "maxSize":{"shape":"Capacity"}, + "desiredSize":{"shape":"Capacity"} + } + }, + "NodegroupStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETING", + "CREATE_FAILED", + "DELETE_FAILED", + "DEGRADED" + ] + }, "NotFoundException":{ "type":"structure", "members":{ @@ -502,10 +854,18 @@ "issuer":{"shape":"String"} } }, + "RemoteAccessConfig":{ + "type":"structure", + "members":{ + "ec2SshKey":{"shape":"String"}, + "sourceSecurityGroups":{"shape":"StringList"} + } + }, "ResourceInUseException":{ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":409}, @@ -515,6 +875,7 @@ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":400}, @@ -524,6 +885,7 @@ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":404}, @@ -533,6 +895,7 @@ "type":"structure", "members":{ "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "message":{"shape":"String"} }, "error":{"httpStatusCode":500}, @@ -601,6 +964,7 @@ "members":{ "message":{"shape":"String"}, "clusterName":{"shape":"String"}, + "nodegroupName":{"shape":"String"}, "validZones":{"shape":"StringList"} }, "error":{"httpStatusCode":400}, @@ -689,6 +1053,76 @@ "update":{"shape":"Update"} } }, + "UpdateLabelsPayload":{ + "type":"structure", + "members":{ + "addOrUpdateLabels":{"shape":"labelsMap"}, + "removeLabels":{"shape":"labelsKeyList"} + } + }, + "UpdateNodegroupConfigRequest":{ + "type":"structure", + "required":[ + "clusterName", + "nodegroupName" + ], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "nodegroupName":{ + "shape":"String", + "location":"uri", + "locationName":"nodegroupName" + }, + "labels":{"shape":"UpdateLabelsPayload"}, + "scalingConfig":{"shape":"NodegroupScalingConfig"}, + "clientRequestToken":{ + "shape":"String", + "idempotencyToken":true + } + } + }, + "UpdateNodegroupConfigResponse":{ + "type":"structure", + "members":{ + "update":{"shape":"Update"} + } + }, + "UpdateNodegroupVersionRequest":{ + "type":"structure", + "required":[ + "clusterName", + "nodegroupName" + ], + "members":{ + "clusterName":{ + "shape":"String", + "location":"uri", + "locationName":"name" + }, + "nodegroupName":{ + "shape":"String", + "location":"uri", + "locationName":"nodegroupName" + }, + "version":{"shape":"String"}, + "releaseVersion":{"shape":"String"}, + "force":{"shape":"Boolean"}, + "clientRequestToken":{ + "shape":"String", + "idempotencyToken":true + } + } + }, + "UpdateNodegroupVersionResponse":{ + "type":"structure", + "members":{ + "update":{"shape":"Update"} + } + }, "UpdateParam":{ "type":"structure", "members":{ @@ -703,7 +1137,13 @@ "PlatformVersion", "EndpointPrivateAccess", "EndpointPublicAccess", - "ClusterLogging" + "ClusterLogging", + "DesiredSize", + "LabelsToAdd", + "LabelsToRemove", + "MaxSize", + "MinSize", + "ReleaseVersion" ] }, "UpdateParams":{ @@ -724,7 +1164,8 @@ "enum":[ "VersionUpdate", "EndpointAccessUpdate", - "LoggingUpdate" + "LoggingUpdate", + "ConfigUpdate" ] }, "VpcConfigRequest":{ @@ -741,10 +1182,30 @@ "members":{ "subnetIds":{"shape":"StringList"}, "securityGroupIds":{"shape":"StringList"}, + "clusterSecurityGroupId":{"shape":"String"}, "vpcId":{"shape":"String"}, "endpointPublicAccess":{"shape":"Boolean"}, "endpointPrivateAccess":{"shape":"Boolean"} } + }, + "labelKey":{ + "type":"string", + "max":63, + "min":1 + }, + "labelValue":{ + "type":"string", + "max":253, + "min":1 + }, + "labelsKeyList":{ + "type":"list", + "member":{"shape":"String"} + }, + "labelsMap":{ + "type":"map", + "key":{"shape":"labelKey"}, + "value":{"shape":"labelValue"} } } } diff --git a/models/apis/eks/2017-11-01/docs-2.json b/models/apis/eks/2017-11-01/docs-2.json index 307719237fc..960da577fa5 100644 --- a/models/apis/eks/2017-11-01/docs-2.json +++ b/models/apis/eks/2017-11-01/docs-2.json @@ -3,18 +3,43 @@ "service": "

Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on AWS without needing to stand up or maintain your own Kubernetes control plane. Kubernetes is an open-source system for automating the deployment, scaling, and management of containerized applications.

Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use all the existing plugins and tooling from the Kubernetes community. Applications running on Amazon EKS are fully compatible with applications running on any standard Kubernetes environment, whether running in on-premises data centers or public clouds. This means that you can easily migrate any standard Kubernetes application to Amazon EKS without any code modification required.

", "operations": { "CreateCluster": "

Creates an Amazon EKS control plane.

The Amazon EKS control plane consists of control plane instances that run the Kubernetes software, such as etcd and the API server. The control plane runs in an account managed by AWS, and the Kubernetes API is exposed via the Amazon EKS API server endpoint. Each Amazon EKS cluster control plane is single-tenant and unique and runs on its own set of Amazon EC2 instances.

The cluster control plane is provisioned across multiple Availability Zones and fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC subnets to provide connectivity from the control plane instances to the worker nodes (for example, to support kubectl exec, logs, and proxy data flows).

Amazon EKS worker nodes run in your AWS account and connect to your cluster's control plane via the Kubernetes API server endpoint and a certificate file that is created for your cluster.

You can use the endpointPublicAccess and endpointPrivateAccess parameters to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

You can use the logging parameter to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see Amazon CloudWatch Pricing.

Cluster creation typically takes between 10 and 15 minutes. After you create an Amazon EKS cluster, you must configure your Kubernetes tooling to communicate with the API server and launch worker nodes into your cluster. For more information, see Managing Cluster Authentication and Launching Amazon EKS Worker Nodes in the Amazon EKS User Guide.

", - "DeleteCluster": "

Deletes the Amazon EKS cluster control plane.

If you have active services in your cluster that are associated with a load balancer, you must delete those services before deleting the cluster so that the load balancers are deleted properly. Otherwise, you can have orphaned resources in your VPC that prevent you from being able to delete the VPC. For more information, see Deleting a Cluster in the Amazon EKS User Guide.

", + "CreateNodegroup": "

Creates a managed worker node group for an Amazon EKS cluster. You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster.

An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by AWS for an Amazon EKS cluster. Each node group uses a version of the Amazon EKS-optimized Amazon Linux 2 AMI. For more information, see Managed Node Groups in the Amazon EKS User Guide.

", + "DeleteCluster": "

Deletes the Amazon EKS cluster control plane.

If you have active services in your cluster that are associated with a load balancer, you must delete those services before deleting the cluster so that the load balancers are deleted properly. Otherwise, you can have orphaned resources in your VPC that prevent you from being able to delete the VPC. For more information, see Deleting a Cluster in the Amazon EKS User Guide.

If you have managed node groups attached to the cluster, you must delete them first. For more information, see DeleteNodegroup.

", + "DeleteNodegroup": "

Deletes an Amazon EKS node group for a cluster.

", "DescribeCluster": "

Returns descriptive information about an Amazon EKS cluster.

The API server endpoint and certificate authority data returned by this operation are required for kubelet and kubectl to communicate with your Kubernetes API server. For more information, see Create a kubeconfig for Amazon EKS.

The API server endpoint and certificate authority data aren't available until the cluster reaches the ACTIVE state.

", - "DescribeUpdate": "

Returns descriptive information about an update against your Amazon EKS cluster.

When the status of the update is Succeeded, the update is complete. If an update fails, the status is Failed, and an error detail explains the reason for the failure.

", + "DescribeNodegroup": "

Returns descriptive information about an Amazon EKS node group.

", + "DescribeUpdate": "

Returns descriptive information about an update against your Amazon EKS cluster or associated managed node group.

When the status of the update is Succeeded, the update is complete. If an update fails, the status is Failed, and an error detail explains the reason for the failure.

", "ListClusters": "

Lists the Amazon EKS clusters in your AWS account in the specified Region.

", + "ListNodegroups": "

Lists the Amazon EKS node groups associated with the specified cluster in your AWS account in the specified Region.

", "ListTagsForResource": "

List the tags for an Amazon EKS resource.

", - "ListUpdates": "

Lists the updates associated with an Amazon EKS cluster in your AWS account, in the specified Region.

", - "TagResource": "

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well.

", + "ListUpdates": "

Lists the updates associated with an Amazon EKS cluster or managed node group in your AWS account, in the specified Region.

", + "TagResource": "

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well. Tags that you create for Amazon EKS resources do not propagate to any other resources associated with the cluster. For example, if you tag a cluster with this operation, that tag does not automatically propagate to the subnets and worker nodes associated with the cluster.

", "UntagResource": "

Deletes specified tags from a resource.

", "UpdateClusterConfig": "

Updates an Amazon EKS cluster configuration. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.

You can use this API operation to enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see Amazon CloudWatch Pricing.

You can also use this API operation to enable or disable public and private access to your cluster's Kubernetes API server endpoint. By default, public access is enabled, and private access is disabled. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

At this time, you can not update the subnets or security group IDs for an existing cluster.

Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active.

", - "UpdateClusterVersion": "

Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.

Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active.

" + "UpdateClusterVersion": "

Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues to function during the update. The response output includes an update ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.

Cluster updates are asynchronous, and they should finish within a few minutes. During an update, the cluster status moves to UPDATING (this status transition is eventually consistent). When the update is complete (either Failed or Successful), the cluster status moves to Active.

If your cluster has managed node groups attached to it, all of your node groups’ Kubernetes versions must match the cluster’s Kubernetes version in order to update the cluster to a new Kubernetes version.

", + "UpdateNodegroupConfig": "

Updates an Amazon EKS managed node group configuration. Your node group continues to function during the update. The response output includes an update ID that you can use to track the status of your node group update with the DescribeUpdate API operation. Currently you can update the Kubernetes labels for a node group or the scaling configuration.

", + "UpdateNodegroupVersion": "

Updates the Kubernetes version or AMI version of an Amazon EKS managed node group.

You can update to the latest available AMI version of a node group's current Kubernetes version by not specifying a Kubernetes version in the request. You can update to the latest AMI version of your cluster's current Kubernetes version by specifying your cluster's Kubernetes version in the request. For more information, see Amazon EKS-Optimized Linux AMI Versions in the Amazon EKS User Guide.

You cannot roll back a node group to an earlier Kubernetes version or AMI version.

When a node in a managed node group is terminated due to a scaling action or update, the pods in that node are drained first. Amazon EKS attempts to drain the nodes gracefully and will fail if it is unable to do so. You can force the update if Amazon EKS is unable to drain the nodes as a result of a pod disruption budget issue.

" }, "shapes": { + "AMITypes": { + "base": null, + "refs": { + "CreateNodegroupRequest$amiType": "

The AMI type for your node group. GPU instance types should use the AL2_x86_64_GPU AMI type, which uses the Amazon EKS-optimized Linux AMI with GPU support; non-GPU instances should use the AL2_x86_64 AMI type, which uses the Amazon EKS-optimized Linux AMI.

", + "Nodegroup$amiType": "

The AMI type associated with your node group. GPU instance types should use the AL2_x86_64_GPU AMI type, which uses the Amazon EKS-optimized Linux AMI with GPU support; non-GPU instances should use the AL2_x86_64 AMI type, which uses the Amazon EKS-optimized Linux AMI.

" + } + }, + "AutoScalingGroup": { + "base": "

An AutoScaling group that is associated with an Amazon EKS managed node group.

", + "refs": { + "AutoScalingGroupList$member": null + } + }, + "AutoScalingGroupList": { + "base": null, + "refs": { + "NodegroupResources$autoScalingGroups": "

The autoscaling groups associated with the node group.

" + } + }, "BadRequestException": { "base": "

This exception is thrown if the request contains a semantic error. The precise meaning will depend on the API, and will be documented in the error message.

", "refs": { @@ -23,6 +48,7 @@ "Boolean": { "base": null, "refs": { + "UpdateNodegroupVersionRequest$force": "

Force the update if the existing node group's pods are unable to be drained due to a pod disruption budget issue. If a previous update fails because pods could not be drained, you can force the update after it fails to terminate the old node regardless of whether or not any pods are running on the node.

", "VpcConfigResponse$endpointPublicAccess": "

This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server can receive only requests that originate from within the cluster VPC.

", "VpcConfigResponse$endpointPrivateAccess": "

This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate from within your cluster's VPC use the private VPC endpoint instead of traversing the internet.

" } @@ -35,6 +61,21 @@ "VpcConfigRequest$endpointPrivateAccess": "

Set this value to true to enable private access for your cluster's Kubernetes API server endpoint. If you enable private access, Kubernetes API requests from within your cluster's VPC use the private VPC endpoint. The default value for this parameter is false, which disables private access for your Kubernetes API server. For more information, see Amazon EKS Cluster Endpoint Access Control in the Amazon EKS User Guide .

" } }, + "BoxedInteger": { + "base": null, + "refs": { + "CreateNodegroupRequest$diskSize": "

The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB.

", + "Nodegroup$diskSize": "

The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB.

" + } + }, + "Capacity": { + "base": null, + "refs": { + "NodegroupScalingConfig$minSize": "

The minimum number of worker nodes that the managed node group can scale in to. This number must be greater than zero.

", + "NodegroupScalingConfig$maxSize": "

The maximum number of worker nodes that the managed node group can scale out to. Managed node groups can support up to 100 nodes by default.

", + "NodegroupScalingConfig$desiredSize": "

The current number of worker nodes that the managed node group should maintain.

" + } + }, "Certificate": { "base": "

An object representing the certificate-authority-data for your cluster.

", "refs": { @@ -76,6 +117,16 @@ "refs": { } }, + "CreateNodegroupRequest": { + "base": null, + "refs": { + } + }, + "CreateNodegroupResponse": { + "base": null, + "refs": { + } + }, "DeleteClusterRequest": { "base": null, "refs": { @@ -86,6 +137,16 @@ "refs": { } }, + "DeleteNodegroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteNodegroupResponse": { + "base": null, + "refs": { + } + }, "DescribeClusterRequest": { "base": null, "refs": { @@ -96,6 +157,16 @@ "refs": { } }, + "DescribeNodegroupRequest": { + "base": null, + "refs": { + } + }, + "DescribeNodegroupResponse": { + "base": null, + "refs": { + } + }, "DescribeUpdateRequest": { "base": null, "refs": { @@ -140,6 +211,18 @@ "refs": { } }, + "Issue": { + "base": "

An object representing an issue with an Amazon EKS resource.

", + "refs": { + "IssueList$member": null + } + }, + "IssueList": { + "base": null, + "refs": { + "NodegroupHealth$issues": "

Any issues that are associated with the node group.

" + } + }, "ListClustersRequest": { "base": null, "refs": { @@ -156,6 +239,22 @@ "refs": { } }, + "ListNodegroupsRequest": { + "base": null, + "refs": { + } + }, + "ListNodegroupsRequestMaxResults": { + "base": null, + "refs": { + "ListNodegroupsRequest$maxResults": "

The maximum number of node group results returned by ListNodegroups in paginated output. When you use this parameter, ListNodegroups returns only maxResults results in a single page along with a nextToken response element. You can see the remaining results of the initial request by sending another ListNodegroups request with the returned nextToken value. This value can be between 1 and 100. If you don't use this parameter, ListNodegroups returns up to 100 results and a nextToken value if applicable.

" + } + }, + "ListNodegroupsResponse": { + "base": null, + "refs": { + } + }, "ListTagsForResourceRequest": { "base": null, "refs": { @@ -214,6 +313,46 @@ "UpdateClusterConfigRequest$logging": "

Enable or disable exporting the Kubernetes control plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the Amazon EKS User Guide .

CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see Amazon CloudWatch Pricing.

" } }, + "Nodegroup": { + "base": "

An object representing an Amazon EKS managed node group.

", + "refs": { + "CreateNodegroupResponse$nodegroup": "

The full description of your new node group.

", + "DeleteNodegroupResponse$nodegroup": "

The full description of your deleted node group.

", + "DescribeNodegroupResponse$nodegroup": "

The full description of your node group.

" + } + }, + "NodegroupHealth": { + "base": "

An object representing the health status of the node group.

", + "refs": { + "Nodegroup$health": "

The health status of the node group. If there are issues with your node group's health, they are listed here.

" + } + }, + "NodegroupIssueCode": { + "base": null, + "refs": { + "Issue$code": "

A brief description of the error.

" + } + }, + "NodegroupResources": { + "base": "

An object representing the resources associated with the nodegroup, such as AutoScaling groups and security groups for remote access.

", + "refs": { + "Nodegroup$resources": "

The resources associated with the nodegroup, such as AutoScaling groups and security groups for remote access.

" + } + }, + "NodegroupScalingConfig": { + "base": "

An object representing the scaling configuration details for the AutoScaling group that is associated with your node group.

", + "refs": { + "CreateNodegroupRequest$scalingConfig": "

The scaling configuration details for the AutoScaling group that is created for your node group.

", + "Nodegroup$scalingConfig": "

The scaling configuration details for the AutoScaling group that is associated with your node group.

", + "UpdateNodegroupConfigRequest$scalingConfig": "

The scaling configuration details for the AutoScaling group after the update.

" + } + }, + "NodegroupStatus": { + "base": null, + "refs": { + "Nodegroup$status": "

The current status of the managed node group.

" + } + }, "NotFoundException": { "base": "

A service resource associated with the request could not be found. Clients should not retry such requests.

", "refs": { @@ -225,6 +364,13 @@ "Identity$oidc": "

The OpenID Connect identity provider information for the cluster.

" } }, + "RemoteAccessConfig": { + "base": "

An object representing the remote access configuration for the managed node group.

", + "refs": { + "CreateNodegroupRequest$remoteAccess": "

The remote access (SSH) configuration to use with your node group.

", + "Nodegroup$remoteAccess": "

The remote access (SSH) configuration that is associated with the node group.

" + } + }, "ResourceInUseException": { "base": "

The specified resource is in use.

", "refs": { @@ -236,7 +382,7 @@ } }, "ResourceNotFoundException": { - "base": "

The specified resource could not be found. You can view your available clusters with ListClusters. Amazon EKS clusters are Region-specific.

", + "base": "

The specified resource could not be found. You can view your available clusters with ListClusters. You can view your available managed node groups with ListNodegroups. Amazon EKS clusters and node groups are Region-specific.

", "refs": { } }, @@ -253,9 +399,11 @@ "String": { "base": null, "refs": { + "AutoScalingGroup$name": "

The name of the AutoScaling group associated with an Amazon EKS managed node group.

", "BadRequestException$message": null, "Certificate$data": "

The Base64-encoded certificate data required to communicate with your cluster. Add this to the certificate-authority-data section of the kubeconfig file for your cluster.

", "ClientException$clusterName": "

The Amazon EKS cluster associated with the exception.

", + "ClientException$nodegroupName": null, "ClientException$message": null, "Cluster$name": "

The name of the cluster.

", "Cluster$arn": "

The Amazon Resource Name (ARN) of the cluster.

", @@ -267,53 +415,101 @@ "CreateClusterRequest$version": "

The desired Kubernetes version for your cluster. If you don't specify a value here, the latest version available in Amazon EKS is used.

", "CreateClusterRequest$roleArn": "

The Amazon Resource Name (ARN) of the IAM role that provides permissions for Amazon EKS to make calls to other AWS API operations on your behalf. For more information, see Amazon EKS Service IAM Role in the Amazon EKS User Guide .

", "CreateClusterRequest$clientRequestToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "CreateNodegroupRequest$clusterName": "

The name of the cluster to create the node group in.

", + "CreateNodegroupRequest$nodegroupName": "

The unique name to give your node group.

", + "CreateNodegroupRequest$nodeRole": "

The IAM role associated with your node group. The Amazon EKS worker node kubelet daemon makes calls to AWS APIs on your behalf. Worker nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch worker nodes and register them into a cluster, you must create an IAM role for those worker nodes to use when they are launched. For more information, see Amazon EKS Worker Node IAM Role in the Amazon EKS User Guide .

", + "CreateNodegroupRequest$clientRequestToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "CreateNodegroupRequest$version": "

The Kubernetes version to use for your managed nodes. By default, the Kubernetes version of the cluster is used, and this is the only accepted specified value.

", + "CreateNodegroupRequest$releaseVersion": "

The AMI version of the Amazon EKS-optimized AMI to use with your node group. By default, the latest available AMI version for the node group's current Kubernetes version is used. For more information, see Amazon EKS-Optimized Linux AMI Versions in the Amazon EKS User Guide.

", "DeleteClusterRequest$name": "

The name of the cluster to delete.

", + "DeleteNodegroupRequest$clusterName": "

The name of the Amazon EKS cluster that is associated with your node group.

", + "DeleteNodegroupRequest$nodegroupName": "

The name of the node group to delete.

", "DescribeClusterRequest$name": "

The name of the cluster to describe.

", - "DescribeUpdateRequest$name": "

The name of the Amazon EKS cluster to update.

", + "DescribeNodegroupRequest$clusterName": "

The name of the Amazon EKS cluster associated with the node group.

", + "DescribeNodegroupRequest$nodegroupName": "

The name of the node group to describe.

", + "DescribeUpdateRequest$name": "

The name of the Amazon EKS cluster associated with the update.

", "DescribeUpdateRequest$updateId": "

The ID of the update to describe.

", + "DescribeUpdateRequest$nodegroupName": "

The name of the Amazon EKS node group associated with the update.

", "ErrorDetail$errorMessage": "

A more complete description of the error.

", "InvalidParameterException$clusterName": "

The Amazon EKS cluster associated with the exception.

", + "InvalidParameterException$nodegroupName": null, "InvalidParameterException$message": null, "InvalidRequestException$clusterName": "

The Amazon EKS cluster associated with the exception.

", + "InvalidRequestException$nodegroupName": null, "InvalidRequestException$message": null, + "Issue$message": "

The error message associated with the issue.

", "ListClustersRequest$nextToken": "

The nextToken value returned from a previous paginated ListClusters request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

This token should be treated as an opaque identifier that is used only to retrieve the next items in a list and not for other programmatic purposes.

", "ListClustersResponse$nextToken": "

The nextToken value to include in a future ListClusters request. When the results of a ListClusters request exceed maxResults, you can use this value to retrieve the next page of results. This value is null when there are no more results to return.

", - "ListTagsForResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Amazon EKS clusters.

", + "ListNodegroupsRequest$clusterName": "

The name of the Amazon EKS cluster that you would like to list node groups in.

", + "ListNodegroupsRequest$nextToken": "

The nextToken value returned from a previous paginated ListNodegroups request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

", + "ListNodegroupsResponse$nextToken": "

The nextToken value to include in a future ListNodegroups request. When the results of a ListNodegroups request exceed maxResults, you can use this value to retrieve the next page of results. This value is null when there are no more results to return.

", + "ListTagsForResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the supported resources are Amazon EKS clusters and managed node groups.

", "ListUpdatesRequest$name": "

The name of the Amazon EKS cluster to list updates for.

", + "ListUpdatesRequest$nodegroupName": "

The name of the Amazon EKS managed node group to list updates for.

", "ListUpdatesRequest$nextToken": "

The nextToken value returned from a previous paginated ListUpdates request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value.

", "ListUpdatesResponse$nextToken": "

The nextToken value to include in a future ListUpdates request. When the results of a ListUpdates request exceed maxResults, you can use this value to retrieve the next page of results. This value is null when there are no more results to return.

", + "Nodegroup$nodegroupName": "

The name associated with an Amazon EKS managed node group.

", + "Nodegroup$nodegroupArn": "

The Amazon Resource Name (ARN) associated with the managed node group.

", + "Nodegroup$clusterName": "

The name of the cluster that the managed node group resides in.

", + "Nodegroup$version": "

The Kubernetes version of the managed node group.

", + "Nodegroup$releaseVersion": "

The AMI version of the managed node group. For more information, see Amazon EKS-Optimized Linux AMI Versions in the Amazon EKS User Guide.

", + "Nodegroup$nodeRole": "

The IAM role associated with your node group. The Amazon EKS worker node kubelet daemon makes calls to AWS APIs on your behalf. Worker nodes receive permissions for these API calls through an IAM instance profile and associated policies. Before you can launch worker nodes and register them into a cluster, you must create an IAM role for those worker nodes to use when they are launched. For more information, see Amazon EKS Worker Node IAM Role in the Amazon EKS User Guide .

", + "NodegroupResources$remoteAccessSecurityGroup": "

The remote access security group associated with the node group. This security group controls SSH access to the worker nodes.

", "NotFoundException$message": null, "OIDC$issuer": "

The issuer URL for the OpenID Connect identity provider.

", + "RemoteAccessConfig$ec2SshKey": "

The Amazon EC2 SSH key that provides access for SSH communication with the worker nodes in the managed node group. For more information, see Amazon EC2 Key Pairs in the Amazon Elastic Compute Cloud User Guide for Linux Instances.

", "ResourceInUseException$clusterName": "

The Amazon EKS cluster associated with the exception.

", + "ResourceInUseException$nodegroupName": null, "ResourceInUseException$message": null, "ResourceLimitExceededException$clusterName": "

The Amazon EKS cluster associated with the exception.

", + "ResourceLimitExceededException$nodegroupName": null, "ResourceLimitExceededException$message": null, "ResourceNotFoundException$clusterName": "

The Amazon EKS cluster associated with the exception.

", + "ResourceNotFoundException$nodegroupName": null, "ResourceNotFoundException$message": null, "ServerException$clusterName": "

The Amazon EKS cluster associated with the exception.

", + "ServerException$nodegroupName": null, "ServerException$message": null, "ServiceUnavailableException$message": null, "StringList$member": null, - "TagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, the supported resources are Amazon EKS clusters.

", + "TagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, the supported resources are Amazon EKS clusters and managed node groups.

", "UnsupportedAvailabilityZoneException$message": null, "UnsupportedAvailabilityZoneException$clusterName": "

The Amazon EKS cluster associated with the exception.

", - "UntagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource from which to delete tags. Currently, the supported resources are Amazon EKS clusters.

", + "UnsupportedAvailabilityZoneException$nodegroupName": null, + "UntagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource from which to delete tags. Currently, the supported resources are Amazon EKS clusters and managed node groups.

", "Update$id": "

A UUID that is used to track the update.

", "UpdateClusterConfigRequest$name": "

The name of the Amazon EKS cluster to update.

", "UpdateClusterConfigRequest$clientRequestToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", "UpdateClusterVersionRequest$name": "

The name of the Amazon EKS cluster to update.

", "UpdateClusterVersionRequest$version": "

The desired Kubernetes version following a successful update.

", "UpdateClusterVersionRequest$clientRequestToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "UpdateNodegroupConfigRequest$clusterName": "

The name of the Amazon EKS cluster that the managed node group resides in.

", + "UpdateNodegroupConfigRequest$nodegroupName": "

The name of the managed node group to update.

", + "UpdateNodegroupConfigRequest$clientRequestToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "UpdateNodegroupVersionRequest$clusterName": "

The name of the Amazon EKS cluster that is associated with the managed node group to update.

", + "UpdateNodegroupVersionRequest$nodegroupName": "

The name of the managed node group to update.

", + "UpdateNodegroupVersionRequest$version": "

The Kubernetes version to update to. If no version is specified, then the Kubernetes version of the node group does not change. You can specify the Kubernetes version of the cluster to update the node group to the latest AMI version of the cluster's Kubernetes version.

", + "UpdateNodegroupVersionRequest$releaseVersion": "

The AMI version of the Amazon EKS-optimized AMI to use for the update. By default, the latest available AMI version for the node group's Kubernetes version is used. For more information, see Amazon EKS-Optimized Linux AMI Versions in the Amazon EKS User Guide.

", + "UpdateNodegroupVersionRequest$clientRequestToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", "UpdateParam$value": "

The value of the keys submitted as part of an update request.

", - "VpcConfigResponse$vpcId": "

The VPC associated with your cluster.

" + "VpcConfigResponse$clusterSecurityGroupId": "

The cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control plane to data plane communication.

", + "VpcConfigResponse$vpcId": "

The VPC associated with your cluster.

", + "labelsKeyList$member": null } }, "StringList": { "base": null, "refs": { + "CreateNodegroupRequest$subnets": "

The subnets to use for the AutoScaling group that is created for your node group. These subnets must have the tag key kubernetes.io/cluster/CLUSTER_NAME with a value of shared, where CLUSTER_NAME is replaced with the name of your cluster.

", + "CreateNodegroupRequest$instanceTypes": "

The instance type to use for your node group. Currently, you can specify a single instance type for a node group. The default value for this parameter is t3.medium. If you choose a GPU instance type, be sure to specify the AL2_x86_64_GPU with the amiType parameter.

", "ErrorDetail$resourceIds": "

An optional field that contains the resource IDs associated with the error.

", + "Issue$resourceIds": "

The AWS resources that are afflicted by this issue.

", "ListClustersResponse$clusters": "

A list of all of the clusters for your account in the specified Region.

", + "ListNodegroupsResponse$nodegroups": "

A list of all of the node groups associated with the specified cluster.

", "ListUpdatesResponse$updateIds": "

A list of all the updates for the specified cluster and Region.

", + "Nodegroup$instanceTypes": "

The instance types associated with your node group.

", + "Nodegroup$subnets": "

The subnets allowed for the AutoScaling group that is associated with your node group. These subnets must have the following tag: kubernetes.io/cluster/CLUSTER_NAME, where CLUSTER_NAME is replaced with the name of your cluster.

", + "RemoteAccessConfig$sourceSecurityGroups": "

The security groups to allow SSH access (port 22) from on the worker nodes. If you specify an Amazon EC2 SSH key, but you do not specify a source security group when you create a managed node group, port 22 on the worker nodes is opened to the internet (0.0.0.0/0). For more information, see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.

", "UnsupportedAvailabilityZoneException$validZones": "

The supported Availability Zones for your account. Choose subnets in these Availability Zones for your cluster.

", "VpcConfigRequest$subnetIds": "

Specify subnets for your Amazon EKS worker nodes. Amazon EKS creates cross-account elastic network interfaces in these subnets to allow communication between your worker nodes and the Kubernetes control plane.

", "VpcConfigRequest$securityGroupIds": "

Specify one or more security groups for the cross-account elastic network interfaces that Amazon EKS creates to use to allow communication between your worker nodes and the Kubernetes control plane. If you don't specify a security group, the default security group for your VPC is used.

", @@ -337,9 +533,11 @@ "TagMap": { "base": null, "refs": { - "Cluster$tags": "

The metadata that you apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define.

", + "Cluster$tags": "

The metadata that you apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Cluster tags do not propagate to any other resources associated with the cluster.

", "CreateClusterRequest$tags": "

The metadata to apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define.

", + "CreateNodegroupRequest$tags": "

The metadata to apply to the node group to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Node group tags do not propagate to any other resources associated with the node group, such as the Amazon EC2 instances or subnets.

", "ListTagsForResourceResponse$tags": "

The tags for the resource.

", + "Nodegroup$tags": "

The metadata applied the node group to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Node group tags do not propagate to any other resources associated with the node group, such as the Amazon EC2 instances or subnets.

", "TagResourceRequest$tags": "

The tags to add to the resource. A tag is an array of key-value pairs.

" } }, @@ -363,6 +561,8 @@ "base": null, "refs": { "Cluster$createdAt": "

The Unix epoch timestamp in seconds for when the cluster was created.

", + "Nodegroup$createdAt": "

The Unix epoch timestamp in seconds for when the managed node group was created.

", + "Nodegroup$modifiedAt": "

The Unix epoch timestamp in seconds for when the managed node group was last modified.

", "Update$createdAt": "

The Unix epoch timestamp in seconds for when the update was created.

" } }, @@ -386,7 +586,9 @@ "refs": { "DescribeUpdateResponse$update": "

The full description of the specified update.

", "UpdateClusterConfigResponse$update": null, - "UpdateClusterVersionResponse$update": "

The full description of the specified update

" + "UpdateClusterVersionResponse$update": "

The full description of the specified update

", + "UpdateNodegroupConfigResponse$update": null, + "UpdateNodegroupVersionResponse$update": null } }, "UpdateClusterConfigRequest": { @@ -409,6 +611,32 @@ "refs": { } }, + "UpdateLabelsPayload": { + "base": "

An object representing a Kubernetes label change for a managed node group.

", + "refs": { + "UpdateNodegroupConfigRequest$labels": "

The Kubernetes labels to be applied to the nodes in the node group after the update.

" + } + }, + "UpdateNodegroupConfigRequest": { + "base": null, + "refs": { + } + }, + "UpdateNodegroupConfigResponse": { + "base": null, + "refs": { + } + }, + "UpdateNodegroupVersionRequest": { + "base": null, + "refs": { + } + }, + "UpdateNodegroupVersionResponse": { + "base": null, + "refs": { + } + }, "UpdateParam": { "base": "

An object representing the details of an update request.

", "refs": { @@ -451,6 +679,32 @@ "refs": { "Cluster$resourcesVpcConfig": "

The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see Cluster VPC Considerations and Cluster Security Group Considerations in the Amazon EKS User Guide.

" } + }, + "labelKey": { + "base": null, + "refs": { + "labelsMap$key": null + } + }, + "labelValue": { + "base": null, + "refs": { + "labelsMap$value": null + } + }, + "labelsKeyList": { + "base": null, + "refs": { + "UpdateLabelsPayload$removeLabels": "

Kubernetes labels to be removed.

" + } + }, + "labelsMap": { + "base": null, + "refs": { + "CreateNodegroupRequest$labels": "

The Kubernetes labels to be applied to the nodes in the node group when they are created.

", + "Nodegroup$labels": "

The Kubernetes labels applied to the nodes in the node group.

Only labels that are applied with the Amazon EKS API are shown here. There may be other Kubernetes labels applied to the nodes in this group.

", + "UpdateLabelsPayload$addOrUpdateLabels": "

Kubernetes labels to be added or updated.

" + } } } } diff --git a/models/apis/eks/2017-11-01/examples-1.json b/models/apis/eks/2017-11-01/examples-1.json index 6a83da723a4..8ea2517578d 100644 --- a/models/apis/eks/2017-11-01/examples-1.json +++ b/models/apis/eks/2017-11-01/examples-1.json @@ -109,6 +109,27 @@ "id": "to-list-your-available-clusters-1527868801040", "title": "To list your available clusters" } + ], + "ListTagsForResource": [ + { + "input": { + "resourceArn": "arn:aws:eks:us-west-2:012345678910:cluster/beta" + }, + "output": { + "tags": { + "aws:tag:domain": "beta" + } + }, + "comments": { + "input": { + }, + "output": { + } + }, + "description": "This example lists all of the tags for the `beta` cluster.", + "id": "to-list-tags-for-a-cluster-1568666903378", + "title": "To list tags for a cluster" + } ] } } diff --git a/models/apis/eks/2017-11-01/paginators-1.json b/models/apis/eks/2017-11-01/paginators-1.json index abd2c6bdec9..662c51e44bb 100644 --- a/models/apis/eks/2017-11-01/paginators-1.json +++ b/models/apis/eks/2017-11-01/paginators-1.json @@ -6,6 +6,12 @@ "output_token": "nextToken", "result_key": "clusters" }, + "ListNodegroups": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "nodegroups" + }, "ListUpdates": { "input_token": "nextToken", "limit_key": "maxResults", diff --git a/models/apis/eks/2017-11-01/waiters-2.json b/models/apis/eks/2017-11-01/waiters-2.json index c325e521f59..449d2296c39 100644 --- a/models/apis/eks/2017-11-01/waiters-2.json +++ b/models/apis/eks/2017-11-01/waiters-2.json @@ -49,6 +49,43 @@ "state": "success" } ] + }, + "NodegroupActive": { + "delay": 30, + "operation": "DescribeNodegroup", + "maxAttempts": 80, + "acceptors": [ + { + "expected": "CREATE_FAILED", + "matcher": "path", + "state": "failure", + "argument": "nodegroup.status" + }, + { + "expected": "ACTIVE", + "matcher": "path", + "state": "success", + "argument": "nodegroup.status" + } + ] + }, + "NodegroupDeleted": { + "delay": 30, + "operation": "DescribeNodegroup", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "DELETE_FAILED", + "matcher": "path", + "state": "failure", + "argument": "nodegroup.status" + }, + { + "expected": "ResourceNotFoundException", + "matcher": "error", + "state": "success" + } + ] } } } diff --git a/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json b/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json index c05766c4e4e..0254075cbe3 100644 --- a/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json +++ b/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json @@ -73,7 +73,8 @@ {"shape":"TooManyRegistrationsForTargetIdException"}, {"shape":"TooManyTargetsException"}, {"shape":"TooManyActionsException"}, - {"shape":"InvalidLoadBalancerActionException"} + {"shape":"InvalidLoadBalancerActionException"}, + {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"} ] }, "CreateLoadBalancer":{ @@ -127,7 +128,8 @@ {"shape":"TooManyTargetsException"}, {"shape":"UnsupportedProtocolException"}, {"shape":"TooManyActionsException"}, - {"shape":"InvalidLoadBalancerActionException"} + {"shape":"InvalidLoadBalancerActionException"}, + {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"} ] }, "CreateTargetGroup":{ @@ -424,7 +426,8 @@ {"shape":"TooManyRegistrationsForTargetIdException"}, {"shape":"TooManyTargetsException"}, {"shape":"TooManyActionsException"}, - {"shape":"InvalidLoadBalancerActionException"} + {"shape":"InvalidLoadBalancerActionException"}, + {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"} ] }, "ModifyLoadBalancerAttributes":{ @@ -464,7 +467,8 @@ {"shape":"TargetGroupNotFoundException"}, {"shape":"UnsupportedProtocolException"}, {"shape":"TooManyActionsException"}, - {"shape":"InvalidLoadBalancerActionException"} + {"shape":"InvalidLoadBalancerActionException"}, + {"shape":"TooManyUniqueTargetGroupsPerLoadBalancerException"} ] }, "ModifyTargetGroup":{ @@ -635,7 +639,8 @@ "AuthenticateCognitoConfig":{"shape":"AuthenticateCognitoActionConfig"}, "Order":{"shape":"ActionOrder"}, "RedirectConfig":{"shape":"RedirectActionConfig"}, - "FixedResponseConfig":{"shape":"FixedResponseActionConfig"} + "FixedResponseConfig":{"shape":"FixedResponseActionConfig"}, + "ForwardConfig":{"shape":"ForwardActionConfig"} } }, "ActionOrder":{ @@ -1242,6 +1247,13 @@ "type":"string", "pattern":"^(2|4|5)\\d\\d$" }, + "ForwardActionConfig":{ + "type":"structure", + "members":{ + "TargetGroups":{"shape":"TargetGroupList"}, + "TargetGroupStickinessConfig":{"shape":"TargetGroupStickinessConfig"} + } + }, "HealthCheckEnabled":{"type":"boolean"}, "HealthCheckIntervalSeconds":{ "type":"integer", @@ -2106,6 +2118,10 @@ "type":"list", "member":{"shape":"TargetGroupAttribute"} }, + "TargetGroupList":{ + "type":"list", + "member":{"shape":"TargetGroupTuple"} + }, "TargetGroupName":{"type":"string"}, "TargetGroupNames":{ "type":"list", @@ -2122,6 +2138,23 @@ }, "exception":true }, + "TargetGroupStickinessConfig":{ + "type":"structure", + "members":{ + "Enabled":{"shape":"TargetGroupStickinessEnabled"}, + "DurationSeconds":{"shape":"TargetGroupStickinessDurationSeconds"} + } + }, + "TargetGroupStickinessDurationSeconds":{"type":"integer"}, + "TargetGroupStickinessEnabled":{"type":"boolean"}, + "TargetGroupTuple":{ + "type":"structure", + "members":{ + "TargetGroupArn":{"shape":"TargetGroupArn"}, + "Weight":{"shape":"TargetGroupWeight"} + } + }, + "TargetGroupWeight":{"type":"integer"}, "TargetGroups":{ "type":"list", "member":{"shape":"TargetGroup"} @@ -2282,6 +2315,17 @@ }, "exception":true }, + "TooManyUniqueTargetGroupsPerLoadBalancerException":{ + "type":"structure", + "members":{ + }, + "error":{ + "code":"TooManyUniqueTargetGroupsPerLoadBalancer", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "UnsupportedProtocolException":{ "type":"structure", "members":{ diff --git a/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json b/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json index 3c7ac399ac2..8186e523977 100644 --- a/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json +++ b/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json @@ -24,9 +24,9 @@ "DescribeTargetGroupAttributes": "

Describes the attributes for the specified target group.

For more information, see Target Group Attributes in the Application Load Balancers Guide or Target Group Attributes in the Network Load Balancers Guide.

", "DescribeTargetGroups": "

Describes the specified target groups or all of your target groups. By default, all target groups are described. Alternatively, you can specify one of the following to filter the results: the ARN of the load balancer, the names of one or more target groups, or the ARNs of one or more target groups.

To describe the targets for a target group, use DescribeTargetHealth. To describe the attributes of a target group, use DescribeTargetGroupAttributes.

", "DescribeTargetHealth": "

Describes the health of the specified targets or all of your targets.

", - "ModifyListener": "

Modifies the specified properties of the specified listener.

Any properties that you do not specify retain their current values. However, changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the security policy and default certificate properties. If you change the protocol from HTTP to HTTPS, or from TCP to TLS, you must add the security policy and default certificate properties.

", + "ModifyListener": "

Replaces the specified properties of the specified listener. Any properties that you do not specify remain unchanged.

Changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the security policy and default certificate properties. If you change the protocol from HTTP to HTTPS, or from TCP to TLS, you must add the security policy and default certificate properties.

To add an item to a list, remove an item from a list, or update an item in a list, you must provide the entire list. For example, to add an action, specify a list with the current actions plus the new action.

", "ModifyLoadBalancerAttributes": "

Modifies the specified attributes of the specified Application Load Balancer or Network Load Balancer.

If any of the specified attributes can't be modified as requested, the call fails. Any existing attributes that you do not modify retain their current values.

", - "ModifyRule": "

Modifies the specified rule.

Any existing properties that you do not modify retain their current values.

To modify the actions for the default rule, use ModifyListener.

", + "ModifyRule": "

Replaces the specified properties of the specified rule. Any properties that you do not specify are unchanged.

To add an item to a list, remove an item from a list, or update an item in a list, you must provide the entire list. For example, to add an action, specify a list with the current actions plus the new action.

To modify the actions for the default rule, use ModifyListener.

", "ModifyTargetGroup": "

Modifies the health checks used when evaluating the health state of the targets in the specified target group.

To monitor the health of the targets, use DescribeTargetHealth.

", "ModifyTargetGroupAttributes": "

Modifies the specified attributes of the specified target group.

", "RegisterTargets": "

Registers the specified targets with the specified target group.

If the target is an EC2 instance, it must be in the running state when you register it.

By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports.

With a Network Load Balancer, you cannot register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address.

To remove a target from a target group, use DeregisterTargets.

", @@ -59,11 +59,11 @@ "Actions": { "base": null, "refs": { - "CreateListenerInput$DefaultActions": "

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

", - "CreateRuleInput$Actions": "

The actions. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect, and it must be the last action to be performed.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

", + "CreateListenerInput$DefaultActions": "

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

", + "CreateRuleInput$Actions": "

The actions. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect, and it must be the last action to be performed.

If the action type is forward, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

", "Listener$DefaultActions": "

The default actions for the listener.

", - "ModifyListenerInput$DefaultActions": "

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

", - "ModifyRuleInput$Actions": "

The actions. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect, and it must be the last action to be performed.

If the action type is forward, you specify a target group. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

", + "ModifyListenerInput$DefaultActions": "

The actions for the default rule. The rule must include one forward action or one or more fixed-response actions.

If the action type is forward, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

", + "ModifyRuleInput$Actions": "

The actions. Each rule must include exactly one of the following types of actions: forward, fixed-response, or redirect, and it must be the last action to be performed.

If the action type is forward, you specify one or more target groups. The protocol of the target group must be HTTP or HTTPS for an Application Load Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a Network Load Balancer.

[HTTPS listeners] If the action type is authenticate-oidc, you authenticate users through an identity provider that is OpenID Connect (OIDC) compliant.

[HTTPS listeners] If the action type is authenticate-cognito, you authenticate users through the user pools supported by Amazon Cognito.

[Application Load Balancer] If the action type is redirect, you redirect specified client requests from one URL to another.

[Application Load Balancer] If the action type is fixed-response, you drop specified client requests and return a custom HTTP response.

", "Rule$Actions": "

The actions. Each rule must include exactly one of the following types of actions: forward, redirect, or fixed-response, and it must be the last action to be performed.

" } }, @@ -606,6 +606,12 @@ "FixedResponseActionConfig$StatusCode": "

The HTTP response code (2XX, 4XX, or 5XX).

" } }, + "ForwardActionConfig": { + "base": "

Information about a forward action.

", + "refs": { + "Action$ForwardConfig": "

Information for creating an action that distributes requests among one or more target groups. For Network Load Balancers, you can specify a single target group. Specify only when Type is forward. If you specify both ForwardConfig and TargetGroupArn, you can specify only one target group using ForwardConfig and it must be the same target group specified in TargetGroupArn.

" + } + }, "HealthCheckEnabled": { "base": null, "refs": { @@ -854,7 +860,7 @@ "LoadBalancerAttributeKey": { "base": null, "refs": { - "LoadBalancerAttribute$Key": "

The name of the attribute.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

The following attributes are supported by only Application Load Balancers:

The following attributes are supported by only Network Load Balancers:

" + "LoadBalancerAttribute$Key": "

The name of the attribute.

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

The following attributes are supported by only Application Load Balancers:

The following attributes are supported by only Network Load Balancers:

" } }, "LoadBalancerAttributeValue": { @@ -1009,7 +1015,7 @@ "Name": { "base": null, "refs": { - "Limit$Name": "

The name of the limit. The possible values are:

" + "Limit$Name": "

The name of the limit. The possible values are:

" } }, "OperationNotPermittedException": { @@ -1479,7 +1485,7 @@ "TargetGroupArn": { "base": null, "refs": { - "Action$TargetGroupArn": "

The Amazon Resource Name (ARN) of the target group. Specify only when Type is forward.

", + "Action$TargetGroupArn": "

The Amazon Resource Name (ARN) of the target group. Specify only when Type is forward and you want to route to a single target group. To route to one or more target groups, use ForwardConfig instead.

", "DeleteTargetGroupInput$TargetGroupArn": "

The Amazon Resource Name (ARN) of the target group.

", "DeregisterTargetsInput$TargetGroupArn": "

The Amazon Resource Name (ARN) of the target group.

", "DescribeTargetGroupAttributesInput$TargetGroupArn": "

The Amazon Resource Name (ARN) of the target group.

", @@ -1488,7 +1494,8 @@ "ModifyTargetGroupInput$TargetGroupArn": "

The Amazon Resource Name (ARN) of the target group.

", "RegisterTargetsInput$TargetGroupArn": "

The Amazon Resource Name (ARN) of the target group.

", "TargetGroup$TargetGroupArn": "

The Amazon Resource Name (ARN) of the target group.

", - "TargetGroupArns$member": null + "TargetGroupArns$member": null, + "TargetGroupTuple$TargetGroupArn": "

The Amazon Resource Name (ARN) of the target group.

" } }, "TargetGroupArns": { @@ -1528,6 +1535,12 @@ "ModifyTargetGroupAttributesOutput$Attributes": "

Information about the attributes.

" } }, + "TargetGroupList": { + "base": null, + "refs": { + "ForwardActionConfig$TargetGroups": "

One or more target groups. For Network Load Balancers, you can specify a single target group.

" + } + }, "TargetGroupName": { "base": null, "refs": { @@ -1547,6 +1560,36 @@ "refs": { } }, + "TargetGroupStickinessConfig": { + "base": "

Information about the target group stickiness for a rule.

", + "refs": { + "ForwardActionConfig$TargetGroupStickinessConfig": "

The target group stickiness for the rule.

" + } + }, + "TargetGroupStickinessDurationSeconds": { + "base": null, + "refs": { + "TargetGroupStickinessConfig$DurationSeconds": "

The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days).

" + } + }, + "TargetGroupStickinessEnabled": { + "base": null, + "refs": { + "TargetGroupStickinessConfig$Enabled": "

Indicates whether target group stickiness is enabled.

" + } + }, + "TargetGroupTuple": { + "base": "

Information about how traffic will be distributed between multiple target groups in a forward rule.

", + "refs": { + "TargetGroupList$member": null + } + }, + "TargetGroupWeight": { + "base": null, + "refs": { + "TargetGroupTuple$Weight": "

The weight. The range is 0 to 999.

" + } + }, "TargetGroups": { "base": null, "refs": { @@ -1643,6 +1686,11 @@ "refs": { } }, + "TooManyUniqueTargetGroupsPerLoadBalancerException": { + "base": "

You've reached the limit on the number of unique target groups per load balancer across all listeners. If a target group is used by multiple actions for a load balancer, it is counted as only one use.

", + "refs": { + } + }, "UnsupportedProtocolException": { "base": "

The specified protocol is not supported.

", "refs": { diff --git a/models/apis/elasticmapreduce/2009-03-31/api-2.json b/models/apis/elasticmapreduce/2009-03-31/api-2.json index 0c689077f09..f3c4c4afdee 100644 --- a/models/apis/elasticmapreduce/2009-03-31/api-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/api-2.json @@ -396,7 +396,8 @@ "type":"structure", "members":{ "ClusterId":{"shape":"XmlStringMaxLen256"}, - "InstanceFleetId":{"shape":"InstanceFleetId"} + "InstanceFleetId":{"shape":"InstanceFleetId"}, + "ClusterArn":{"shape":"ArnType"} } }, "AddInstanceGroupsInput":{ @@ -414,7 +415,8 @@ "type":"structure", "members":{ "JobFlowId":{"shape":"XmlStringMaxLen256"}, - "InstanceGroupIds":{"shape":"InstanceGroupIdsList"} + "InstanceGroupIds":{"shape":"InstanceGroupIdsList"}, + "ClusterArn":{"shape":"ArnType"} } }, "AddJobFlowStepsInput":{ @@ -653,7 +655,8 @@ "CustomAmiId":{"shape":"XmlStringMaxLen256"}, "EbsRootVolumeSize":{"shape":"Integer"}, "RepoUpgradeOnBoot":{"shape":"RepoUpgradeOnBoot"}, - "KerberosAttributes":{"shape":"KerberosAttributes"} + "KerberosAttributes":{"shape":"KerberosAttributes"}, + "ClusterArn":{"shape":"ArnType"} } }, "ClusterId":{"type":"string"}, @@ -707,7 +710,8 @@ "Id":{"shape":"ClusterId"}, "Name":{"shape":"String"}, "Status":{"shape":"ClusterStatus"}, - "NormalizedInstanceHours":{"shape":"Integer"} + "NormalizedInstanceHours":{"shape":"Integer"}, + "ClusterArn":{"shape":"ArnType"} } }, "ClusterSummaryList":{ @@ -1712,7 +1716,8 @@ "members":{ "ClusterId":{"shape":"ClusterId"}, "InstanceGroupId":{"shape":"InstanceGroupId"}, - "AutoScalingPolicy":{"shape":"AutoScalingPolicyDescription"} + "AutoScalingPolicy":{"shape":"AutoScalingPolicyDescription"}, + "ClusterArn":{"shape":"ArnType"} } }, "PutBlockPublicAccessConfigurationInput":{ @@ -1802,7 +1807,8 @@ "RunJobFlowOutput":{ "type":"structure", "members":{ - "JobFlowId":{"shape":"XmlStringMaxLen256"} + "JobFlowId":{"shape":"XmlStringMaxLen256"}, + "ClusterArn":{"shape":"ArnType"} } }, "ScaleDownBehavior":{ diff --git a/models/apis/elasticmapreduce/2009-03-31/docs-2.json b/models/apis/elasticmapreduce/2009-03-31/docs-2.json index 1cbb3ee50de..6d5aecc63b8 100644 --- a/models/apis/elasticmapreduce/2009-03-31/docs-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/docs-2.json @@ -20,7 +20,7 @@ "ListInstanceGroups": "

Provides all available details about the instance groups in a cluster.

", "ListInstances": "

Provides information for all active EC2 instances and EC2 instances terminated in the last 30 days, up to a maximum of 2,000. EC2 instances in any of the following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.

", "ListSecurityConfigurations": "

Lists all the security configurations visible to this account, providing their creation dates and times, and their names. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListSecurityConfigurations calls.

", - "ListSteps": "

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request.

", + "ListSteps": "

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request of filter by StepStates. You can specify a maximum of ten stepIDs.

", "ModifyInstanceFleet": "

Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID. The call either succeeds or fails atomically.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", "ModifyInstanceGroups": "

ModifyInstanceGroups modifies the number of nodes and configuration settings of an instance group. The input parameters include the new target instance count for the group and the instance group ID. The call will either succeed or fail atomically.

", "PutAutoScalingPolicy": "

Creates or updates an automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric.

", @@ -29,7 +29,7 @@ "RemoveTags": "

Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tag Clusters.

The following example removes the stack tag with value Prod from a cluster:

", "RunJobFlow": "

RunJobFlow creates and starts running a new cluster (job flow). The cluster runs the steps specified. After the steps complete, the cluster stops and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig KeepJobFlowAliveWhenNoSteps parameter is set to TRUE, the cluster transitions to the WAITING state rather than shutting down after the steps have completed.

For additional protection, you can set the JobFlowInstancesConfig TerminationProtected parameter to TRUE to lock the cluster and prevent it from being terminated by API call, user intervention, or in the event of a job flow error.

A maximum of 256 steps are allowed in each job flow.

If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, see Add More than 256 Steps to a Cluster in the Amazon EMR Management Guide.

For long running clusters, we recommend that you periodically store your results.

The instance fleets configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. The RunJobFlow request can contain InstanceFleets parameters or InstanceGroups parameters, but not both.

", "SetTerminationProtection": "

SetTerminationProtection locks a cluster (job flow) so the EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection on a cluster is similar to calling the Amazon EC2 DisableAPITermination API on all EC2 instances in a cluster.

SetTerminationProtection is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.

To terminate a cluster that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false.

For more information, seeManaging Cluster Termination in the Amazon EMR Management Guide.

", - "SetVisibleToAllUsers": "

This member will be deprecated.

Sets whether all AWS Identity and Access Management (IAM) users under your account can access the specified clusters (job flows). This action works on running clusters. You can also set the visibility of a cluster when you launch it using the VisibleToAllUsers parameter of RunJobFlow. The SetVisibleToAllUsers action can be called only by an IAM user who created the cluster or the AWS account that owns the cluster.

", + "SetVisibleToAllUsers": "

Sets the Cluster$VisibleToAllUsers value, which determines whether the cluster is visible to all IAM users of the AWS account associated with the cluster. Only the IAM user who created the cluster or the AWS account root user can call this action. The default value, true, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If set to false, only the IAM user that created the cluster can perform actions. This action works on running clusters. You can override the default true setting when you create a cluster by using the VisibleToAllUsers parameter with RunJobFlow.

", "TerminateJobFlows": "

TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut down, any step not yet completed is canceled and the EC2 instances on which the cluster is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was created.

The maximum number of clusters allowed is 10. The call to TerminateJobFlows is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5 minutes for the cluster to completely terminate and release allocated resources, such as Amazon EC2 instances.

" }, "shapes": { @@ -103,7 +103,13 @@ "ArnType": { "base": null, "refs": { - "BlockPublicAccessConfigurationMetadata$CreatedByArn": "

The Amazon Resource Name that created or last modified the configuration.

" + "AddInstanceFleetOutput$ClusterArn": "

The Amazon Resource Name of the cluster.

", + "AddInstanceGroupsOutput$ClusterArn": "

The Amazon Resource Name of the cluster.

", + "BlockPublicAccessConfigurationMetadata$CreatedByArn": "

The Amazon Resource Name that created or last modified the configuration.

", + "Cluster$ClusterArn": "

The Amazon Resource Name of the cluster.

", + "ClusterSummary$ClusterArn": "

The Amazon Resource Name of the cluster.

", + "PutAutoScalingPolicyOutput$ClusterArn": "

The Amazon Resource Name of the cluster.

", + "RunJobFlowOutput$ClusterArn": "

The Amazon Resource Name of the cluster.

" } }, "AutoScalingPolicy": { @@ -163,15 +169,15 @@ "BlockPublicAccessConfiguration$BlockPublicSecurityGroupRules": "

Indicates whether EMR block public access is enabled (true) or disabled (false). By default, the value is false for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is true.

", "Cluster$AutoTerminate": "

Specifies whether the cluster should terminate after completing all steps.

", "Cluster$TerminationProtected": "

Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.

", - "Cluster$VisibleToAllUsers": "

This member will be deprecated.

Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and manage the cluster if they have the proper policy permissions set. If this value is false, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

", - "JobFlowDetail$VisibleToAllUsers": "

This member will be deprecated.

Specifies whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false, only the IAM user that created the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers action.

", + "Cluster$VisibleToAllUsers": "

Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. The default value, true, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If this value is false, only the IAM user that created the cluster can perform actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of true when you create a cluster by using the VisibleToAllUsers parameter of the RunJobFlow action.

", + "JobFlowDetail$VisibleToAllUsers": "

Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. The default value, true, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If this value is false, only the IAM user that created the cluster can perform actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of true when you create a cluster by using the VisibleToAllUsers parameter of the RunJobFlow action.

", "JobFlowInstancesConfig$KeepJobFlowAliveWhenNoSteps": "

Specifies whether the cluster should remain available after completing all steps.

", "JobFlowInstancesConfig$TerminationProtected": "

Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.

", "JobFlowInstancesDetail$KeepJobFlowAliveWhenNoSteps": "

Specifies whether the cluster should remain available after completing all steps.

", "JobFlowInstancesDetail$TerminationProtected": "

Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.

", - "RunJobFlowInput$VisibleToAllUsers": "

This member will be deprecated.

Whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true, all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false, only the IAM user that created the cluster can view and manage it.

", + "RunJobFlowInput$VisibleToAllUsers": "

A value of true indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. This is the default. A value of false indicates that only the IAM user who created the cluster can perform actions.

", "SetTerminationProtectionInput$TerminationProtected": "

A Boolean that indicates whether to protect the cluster and prevent the Amazon EC2 instances in the cluster from shutting down due to API calls, user intervention, or job-flow error.

", - "SetVisibleToAllUsersInput$VisibleToAllUsers": "

This member will be deprecated.

Whether the specified clusters are visible to all IAM users of the AWS account associated with the cluster. If this value is set to True, all IAM users of that AWS account can view and, if they have the proper IAM policy permissions set, manage the clusters. If it is set to False, only the IAM user that created a cluster can view and manage it.

" + "SetVisibleToAllUsersInput$VisibleToAllUsers": "

A value of true indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. This is the default. A value of false indicates that only the IAM user who created the cluster can perform actions.

" } }, "BooleanObject": { @@ -872,7 +878,7 @@ "Integer": { "base": null, "refs": { - "CloudWatchAlarmDefinition$EvaluationPeriods": "

The number of periods, expressed in seconds using Period, during which the alarm condition must exist before the alarm triggers automatic scaling activity. The default value is 1.

", + "CloudWatchAlarmDefinition$EvaluationPeriods": "

The number of periods, in five-minute increments, during which the alarm condition must exist before the alarm triggers automatic scaling activity. The default value is 1.

", "CloudWatchAlarmDefinition$Period": "

The period, in seconds, over which the statistic is applied. EMR CloudWatch metrics are emitted every five minutes (300 seconds), so if an EMR CloudWatch metric is specified, specify 300.

", "Cluster$NormalizedInstanceHours": "

An approximation of the cost of the cluster, represented in m1.small/hours. This value is incremented one time for every hour an m1.small instance runs. Larger instances are weighted more, so an EC2 instance that is roughly four times more expensive would result in the normalized instance hours being incremented by four. This result is only an approximation and does not reflect the actual billing rate.

", "Cluster$EbsRootVolumeSize": "

The size, in GiB, of the EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.

", @@ -1278,7 +1284,7 @@ } }, "SetVisibleToAllUsersInput": { - "base": "

This member will be deprecated.

The input to the SetVisibleToAllUsers action.

", + "base": "

The input to the SetVisibleToAllUsers action.

", "refs": { } }, @@ -1468,7 +1474,7 @@ "Instance$InstanceGroupId": "

The identifier of the instance group to which this instance belongs.

", "InstanceFleetStateChangeReason$Message": "

An explanatory message.

", "InstanceGroup$Name": "

The name of the instance group.

", - "InstanceGroup$BidPrice": "

The maximum Spot price your are willing to pay for EC2 instances.

An optional, nullable field that applies if the MarketType for the instance group is specified as SPOT. Specify the maximum spot price in USD. If the value is NULL and SPOT is specified, the maximum Spot price is set equal to the On-Demand price.

", + "InstanceGroup$BidPrice": "

The bid price for each EC2 Spot instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", "InstanceGroupStateChangeReason$Message": "

The status change reason description.

", "InstanceStateChangeReason$Message": "

The status change reason description.

", "MetricDimension$Key": "

The dimension name.

", @@ -1607,10 +1613,10 @@ "refs": { "DescribeJobFlowsInput$JobFlowIds": "

Return only job flows whose job flow ID is contained in this list.

", "HadoopJarStepConfig$Args": "

A list of command line arguments passed to the JAR file's main function when executed.

", - "ListStepsInput$StepIds": "

The filter to limit the step list based on the identifier of the steps.

", + "ListStepsInput$StepIds": "

The filter to limit the step list based on the identifier of the steps. You can specify a maximum of ten Step IDs. The character constraint applies to the overall length of the array.

", "ScriptBootstrapActionConfig$Args": "

A list of command line arguments to pass to the bootstrap action script.

", "SetTerminationProtectionInput$JobFlowIds": "

A list of strings that uniquely identify the clusters to protect. This identifier is returned by RunJobFlow and can also be obtained from DescribeJobFlows .

", - "SetVisibleToAllUsersInput$JobFlowIds": "

Identifiers of the job flows to receive the new visibility setting.

", + "SetVisibleToAllUsersInput$JobFlowIds": "

The unique identifier of the job flow (cluster).

", "SupportedProductConfig$Args": "

The list of user-supplied arguments.

", "TerminateJobFlowsInput$JobFlowIds": "

A list of job flows to be shutdown.

" } @@ -1629,10 +1635,10 @@ "InstanceFleet$Name": "

A friendly name for the instance fleet.

", "InstanceFleetConfig$Name": "

The friendly name of the instance fleet.

", "InstanceGroupConfig$Name": "

Friendly name given to the instance group.

", - "InstanceGroupConfig$BidPrice": "

The maximum Spot price your are willing to pay for EC2 instances.

An optional, nullable field that applies if the MarketType for the instance group is specified as SPOT. Specify the maximum spot price in USD. If the value is NULL and SPOT is specified, the maximum Spot price is set equal to the On-Demand price.

", + "InstanceGroupConfig$BidPrice": "

The bid price for each EC2 Spot instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", "InstanceGroupDetail$InstanceGroupId": "

Unique identifier for the instance group.

", "InstanceGroupDetail$Name": "

Friendly name for the instance group.

", - "InstanceGroupDetail$BidPrice": "

The maximum Spot price your are willing to pay for EC2 instances.

An optional, nullable field that applies if the MarketType for the instance group is specified as SPOT. Specified in USD. If the value is NULL and SPOT is specified, the maximum Spot price is set equal to the On-Demand price.

", + "InstanceGroupDetail$BidPrice": "

The bid price for each EC2 Spot instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", "InstanceGroupIdsList$member": null, "InstanceGroupModifyConfig$InstanceGroupId": "

Unique ID of the instance group to expand or shrink.

", "InstanceTypeConfig$BidPrice": "

The bid price for each EC2 Spot instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.

", diff --git a/models/apis/firehose/2015-08-04/api-2.json b/models/apis/firehose/2015-08-04/api-2.json index 15a2f0b01dd..15be5f99c08 100644 --- a/models/apis/firehose/2015-08-04/api-2.json +++ b/models/apis/firehose/2015-08-04/api-2.json @@ -24,7 +24,8 @@ "errors":[ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"}, - {"shape":"ResourceInUseException"} + {"shape":"ResourceInUseException"}, + {"shape":"InvalidKMSResourceException"} ] }, "DeleteDeliveryStream":{ @@ -86,6 +87,7 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArgumentException"}, + {"shape":"InvalidKMSResourceException"}, {"shape":"ServiceUnavailableException"} ] }, @@ -100,6 +102,7 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArgumentException"}, + {"shape":"InvalidKMSResourceException"}, {"shape":"ServiceUnavailableException"} ] }, @@ -115,7 +118,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ResourceInUseException"}, {"shape":"InvalidArgumentException"}, - {"shape":"LimitExceededException"} + {"shape":"LimitExceededException"}, + {"shape":"InvalidKMSResourceException"} ] }, "StopDeliveryStreamEncryption":{ @@ -255,6 +259,7 @@ "DeliveryStreamName":{"shape":"DeliveryStreamName"}, "DeliveryStreamType":{"shape":"DeliveryStreamType"}, "KinesisStreamSourceConfiguration":{"shape":"KinesisStreamSourceConfiguration"}, + "DeliveryStreamEncryptionConfigurationInput":{"shape":"DeliveryStreamEncryptionConfigurationInput"}, "S3DestinationConfiguration":{ "shape":"S3DestinationConfiguration", "deprecated":true @@ -295,7 +300,8 @@ "type":"structure", "required":["DeliveryStreamName"], "members":{ - "DeliveryStreamName":{"shape":"DeliveryStreamName"} + "DeliveryStreamName":{"shape":"DeliveryStreamName"}, + "AllowForceDelete":{"shape":"BooleanObject"} } }, "DeleteDeliveryStreamOutput":{ @@ -325,6 +331,7 @@ "DeliveryStreamName":{"shape":"DeliveryStreamName"}, "DeliveryStreamARN":{"shape":"DeliveryStreamARN"}, "DeliveryStreamStatus":{"shape":"DeliveryStreamStatus"}, + "FailureDescription":{"shape":"FailureDescription"}, "DeliveryStreamEncryptionConfiguration":{"shape":"DeliveryStreamEncryptionConfiguration"}, "DeliveryStreamType":{"shape":"DeliveryStreamType"}, "VersionId":{"shape":"DeliveryStreamVersionId"}, @@ -338,7 +345,18 @@ "DeliveryStreamEncryptionConfiguration":{ "type":"structure", "members":{ - "Status":{"shape":"DeliveryStreamEncryptionStatus"} + "KeyARN":{"shape":"AWSKMSKeyARN"}, + "KeyType":{"shape":"KeyType"}, + "Status":{"shape":"DeliveryStreamEncryptionStatus"}, + "FailureDescription":{"shape":"FailureDescription"} + } + }, + "DeliveryStreamEncryptionConfigurationInput":{ + "type":"structure", + "required":["KeyType"], + "members":{ + "KeyARN":{"shape":"AWSKMSKeyARN"}, + "KeyType":{"shape":"KeyType"} } }, "DeliveryStreamEncryptionStatus":{ @@ -346,8 +364,23 @@ "enum":[ "ENABLED", "ENABLING", + "ENABLING_FAILED", "DISABLED", - "DISABLING" + "DISABLING", + "DISABLING_FAILED" + ] + }, + "DeliveryStreamFailureType":{ + "type":"string", + "enum":[ + "RETIRE_KMS_GRANT_FAILED", + "CREATE_KMS_GRANT_FAILED", + "KMS_ACCESS_DENIED", + "DISABLED_KMS_KEY", + "INVALID_KMS_KEY", + "KMS_KEY_NOT_FOUND", + "KMS_OPT_IN_REQUIRED", + "UNKNOWN_ERROR" ] }, "DeliveryStreamName":{ @@ -364,7 +397,9 @@ "type":"string", "enum":[ "CREATING", + "CREATING_FAILED", "DELETING", + "DELETING_FAILED", "ACTIVE" ] }, @@ -624,6 +659,17 @@ "DataFormatConversionConfiguration":{"shape":"DataFormatConversionConfiguration"} } }, + "FailureDescription":{ + "type":"structure", + "required":[ + "Type", + "Details" + ], + "members":{ + "Type":{"shape":"DeliveryStreamFailureType"}, + "Details":{"shape":"NonEmptyString"} + } + }, "HECAcknowledgmentTimeoutInSeconds":{ "type":"integer", "max":600, @@ -662,6 +708,14 @@ }, "exception":true }, + "InvalidKMSResourceException":{ + "type":"structure", + "members":{ + "code":{"shape":"ErrorCode"}, + "message":{"shape":"ErrorMessage"} + }, + "exception":true + }, "KMSEncryptionConfig":{ "type":"structure", "required":["AWSKMSKeyARN"], @@ -669,6 +723,13 @@ "AWSKMSKeyARN":{"shape":"AWSKMSKeyARN"} } }, + "KeyType":{ + "type":"string", + "enum":[ + "AWS_OWNED_CMK", + "CUSTOMER_MANAGED_CMK" + ] + }, "KinesisStreamARN":{ "type":"string", "max":512, @@ -1265,7 +1326,8 @@ "type":"structure", "required":["DeliveryStreamName"], "members":{ - "DeliveryStreamName":{"shape":"DeliveryStreamName"} + "DeliveryStreamName":{"shape":"DeliveryStreamName"}, + "DeliveryStreamEncryptionConfigurationInput":{"shape":"DeliveryStreamEncryptionConfigurationInput"} } }, "StartDeliveryStreamEncryptionOutput":{ diff --git a/models/apis/firehose/2015-08-04/docs-2.json b/models/apis/firehose/2015-08-04/docs-2.json index d63d6f988c9..cc8157ddb6c 100644 --- a/models/apis/firehose/2015-08-04/docs-2.json +++ b/models/apis/firehose/2015-08-04/docs-2.json @@ -2,15 +2,15 @@ "version": "2.0", "service": "Amazon Kinesis Data Firehose API Reference

Amazon Kinesis Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), Amazon Redshift, and Splunk.

", "operations": { - "CreateDeliveryStream": "

Creates a Kinesis Data Firehose delivery stream.

By default, you can create up to 50 delivery streams per AWS Region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.

", - "DeleteDeliveryStream": "

Deletes a delivery stream and its data.

You can delete a delivery stream only if it is in ACTIVE or DELETING state, and not in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state.

To check the state of a delivery stream, use DescribeDeliveryStream.

While the delivery stream is DELETING state, the service might continue to accept the records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, you should first stop any applications that are sending records before deleting a delivery stream.

", - "DescribeDeliveryStream": "

Describes the specified delivery stream and gets the status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it.

", + "CreateDeliveryStream": "

Creates a Kinesis Data Firehose delivery stream.

By default, you can create up to 50 delivery streams per AWS Region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.

A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.

", + "DeleteDeliveryStream": "

Deletes a delivery stream and its data.

To check the state of a delivery stream, use DescribeDeliveryStream. You can delete a delivery stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a delivery stream that is in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state.

While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.

", + "DescribeDeliveryStream": "

Describes the specified delivery stream and its status. For example, after your delivery stream is created, call DescribeDeliveryStream to see whether the delivery stream is ACTIVE and therefore ready for data to be sent to it.

If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.

", "ListDeliveryStreams": "

Lists your delivery streams in alphabetical order of their names.

The number of delivery streams might be too large to return using a single call to ListDeliveryStreams. You can limit the number of delivery streams returned, using the Limit parameter. To determine whether there are more delivery streams to list, check the value of HasMoreDeliveryStreams in the output. If there are more delivery streams to list, you can request them by calling this operation again and setting the ExclusiveStartDeliveryStreamName parameter to the name of the last delivery stream returned in the last call.

", "ListTagsForDeliveryStream": "

Lists the tags for the specified delivery stream. This operation has a limit of five transactions per second per account.

", "PutRecord": "

Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", "PutRecordBatch": "

Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits, see Amazon Kinesis Data Firehose Limits.

Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error.

If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

If PutRecordBatch throws ServiceUnavailableException, back off and retry. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

", - "StartDeliveryStreamEncryption": "

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the status of the stream to ENABLING, and then to ENABLED. You can continue to read and write data to your stream while its status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption state of a delivery stream, use DescribeDeliveryStream.

You can only enable SSE for a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", - "StopDeliveryStreamEncryption": "

Disables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption state of a delivery stream, use DescribeDeliveryStream.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", + "StartDeliveryStreamEncryption": "

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. In this case, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement and creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again.

You can only enable SSE for a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", + "StopDeliveryStreamEncryption": "

Disables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption state of a delivery stream, use DescribeDeliveryStream.

If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

", "TagDeliveryStream": "

Adds or updates tags for the specified delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

Each delivery stream can have up to 50 tags.

This operation has a limit of five transactions per second per account.

", "UntagDeliveryStream": "

Removes tags from the specified delivery stream. Removed tags are deleted, and you can't recover them after this operation successfully completes.

If you specify a tag that doesn't exist, the operation ignores it.

This operation has a limit of five transactions per second per account.

", "UpdateDestination": "

Updates the specified destination of the specified delivery stream.

Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination.

If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.

Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

" @@ -19,6 +19,8 @@ "AWSKMSKeyARN": { "base": null, "refs": { + "DeliveryStreamEncryptionConfiguration$KeyARN": "

If KeyType is CUSTOMER_MANAGED_CMK, this field contains the ARN of the customer managed CMK. If KeyType is AWS_OWNED_CMK, DeliveryStreamEncryptionConfiguration doesn't contain a value for KeyARN.

", + "DeliveryStreamEncryptionConfigurationInput$KeyARN": "

If you set KeyType to CUSTOMER_MANAGED_CMK, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to AWS_OWNED_CMK, Kinesis Data Firehose uses a service-account CMK.

", "KMSEncryptionConfig$AWSKMSKeyARN": "

The Amazon Resource Name (ARN) of the encryption key. Must belong to the same AWS Region as the destination Amazon S3 bucket. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

" } }, @@ -34,6 +36,7 @@ "refs": { "CloudWatchLoggingOptions$Enabled": "

Enables or disables CloudWatch logging.

", "DataFormatConversionConfiguration$Enabled": "

Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

", + "DeleteDeliveryStreamInput$AllowForceDelete": "

Set this to true if you want to delete the delivery stream even if Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the grant happens due to an AWS KMS issue, Kinesis Data Firehose keeps retrying the delete operation.

The default value is false.

", "DeliveryStreamDescription$HasMoreDestinations": "

Indicates whether there are more destinations available to list.

", "ListDeliveryStreamsOutput$HasMoreDeliveryStreams": "

Indicates whether there are more delivery streams available to list.

", "ListTagsForDeliveryStreamOutput$HasMoreTags": "

If this is true in the response, more tags are available. To list the remaining tags, set ExclusiveStartTagKey to the key of the last tag returned and call ListTagsForDeliveryStream again.

", @@ -198,15 +201,28 @@ } }, "DeliveryStreamEncryptionConfiguration": { - "base": "

Indicates the server-side encryption (SSE) status for the delivery stream.

", + "base": "

Contains information about the server-side encryption (SSE) status for the delivery stream, the type customer master key (CMK) in use, if any, and the ARN of the CMK. You can get DeliveryStreamEncryptionConfiguration by invoking the DescribeDeliveryStream operation.

", "refs": { "DeliveryStreamDescription$DeliveryStreamEncryptionConfiguration": "

Indicates the server-side encryption (SSE) status for the delivery stream.

" } }, + "DeliveryStreamEncryptionConfigurationInput": { + "base": "

Used to specify the type and Amazon Resource Name (ARN) of the CMK needed for Server-Side Encryption (SSE).

", + "refs": { + "CreateDeliveryStreamInput$DeliveryStreamEncryptionConfigurationInput": "

Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).

", + "StartDeliveryStreamEncryptionInput$DeliveryStreamEncryptionConfigurationInput": "

Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed for Server-Side Encryption (SSE).

" + } + }, "DeliveryStreamEncryptionStatus": { "base": null, "refs": { - "DeliveryStreamEncryptionConfiguration$Status": "

For a full description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption.

" + "DeliveryStreamEncryptionConfiguration$Status": "

This is the server-side encryption (SSE) status for the delivery stream. For a full description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption. If this status is ENABLING_FAILED or DISABLING_FAILED, it is the status of the most recent attempt to enable or disable SSE, respectively.

" + } + }, + "DeliveryStreamFailureType": { + "base": null, + "refs": { + "FailureDescription$Type": "

The type of error that caused the failure.

" } }, "DeliveryStreamName": { @@ -237,7 +253,7 @@ "DeliveryStreamStatus": { "base": null, "refs": { - "DeliveryStreamDescription$DeliveryStreamStatus": "

The status of the delivery stream.

" + "DeliveryStreamDescription$DeliveryStreamStatus": "

The status of the delivery stream. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

" } }, "DeliveryStreamType": { @@ -410,6 +426,7 @@ "ErrorCode": { "base": null, "refs": { + "InvalidKMSResourceException$code": null, "PutRecordBatchResponseEntry$ErrorCode": "

The error code for an individual record result.

" } }, @@ -418,6 +435,7 @@ "refs": { "ConcurrentModificationException$message": "

A message that provides information about the error.

", "InvalidArgumentException$message": "

A message that provides information about the error.

", + "InvalidKMSResourceException$message": null, "LimitExceededException$message": "

A message that provides information about the error.

", "PutRecordBatchResponseEntry$ErrorMessage": "

The error message for an individual record result.

", "ResourceInUseException$message": "

A message that provides information about the error.

", @@ -454,6 +472,13 @@ "UpdateDestinationInput$ExtendedS3DestinationUpdate": "

Describes an update for a destination in Amazon S3.

" } }, + "FailureDescription": { + "base": "

Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.

", + "refs": { + "DeliveryStreamDescription$FailureDescription": "

Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.

", + "DeliveryStreamEncryptionConfiguration$FailureDescription": "

Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.

" + } + }, "HECAcknowledgmentTimeoutInSeconds": { "base": null, "refs": { @@ -509,12 +534,24 @@ "refs": { } }, + "InvalidKMSResourceException": { + "base": "

Kinesis Data Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: AccessDeniedException, InvalidStateException, DisabledException, or NotFoundException.

", + "refs": { + } + }, "KMSEncryptionConfig": { "base": "

Describes an encryption key for a destination in Amazon S3.

", "refs": { "EncryptionConfiguration$KMSEncryptionConfig": "

The encryption key.

" } }, + "KeyType": { + "base": null, + "refs": { + "DeliveryStreamEncryptionConfiguration$KeyType": "

Indicates the type of customer master key (CMK) that is used for encryption. The default setting is AWS_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs).

", + "DeliveryStreamEncryptionConfigurationInput$KeyType": "

Indicates the type of customer master key (CMK) to use for encryption. The default setting is AWS_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.

When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is already encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.

" + } + }, "KinesisStreamARN": { "base": null, "refs": { @@ -611,6 +648,7 @@ "base": null, "refs": { "ColumnToJsonKeyMappings$value": null, + "FailureDescription$Details": "

A message providing details about the error that caused the failure.

", "ListOfNonEmptyStrings$member": null } }, @@ -679,7 +717,7 @@ "ParquetCompression": { "base": null, "refs": { - "ParquetSerDe$Compression": "

The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ration is more important than speed.

" + "ParquetSerDe$Compression": "

The compression code to use over data blocks. The possible values are UNCOMPRESSED, SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression speed. Use GZIP if the compression ratio is more important than speed.

" } }, "ParquetPageSizeBytes": { diff --git a/models/apis/fsx/2018-03-01/api-2.json b/models/apis/fsx/2018-03-01/api-2.json index d7006eb0365..5e3abf74bfe 100644 --- a/models/apis/fsx/2018-03-01/api-2.json +++ b/models/apis/fsx/2018-03-01/api-2.json @@ -427,6 +427,8 @@ "members":{ "ActiveDirectoryId":{"shape":"DirectoryId"}, "SelfManagedActiveDirectoryConfiguration":{"shape":"SelfManagedActiveDirectoryConfiguration"}, + "DeploymentType":{"shape":"WindowsDeploymentType"}, + "PreferredSubnetId":{"shape":"SubnetId"}, "ThroughputCapacity":{"shape":"MegabytesPerSecond"}, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, "DailyAutomaticBackupStartTime":{"shape":"DailyTime"}, @@ -1033,11 +1035,22 @@ "min":7, "pattern":"^[1-7]:([01]\\d|2[0-3]):?([0-5]\\d)$" }, + "WindowsDeploymentType":{ + "type":"string", + "enum":[ + "MULTI_AZ_1", + "SINGLE_AZ_1" + ] + }, "WindowsFileSystemConfiguration":{ "type":"structure", "members":{ "ActiveDirectoryId":{"shape":"DirectoryId"}, "SelfManagedActiveDirectoryConfiguration":{"shape":"SelfManagedActiveDirectoryAttributes"}, + "DeploymentType":{"shape":"WindowsDeploymentType"}, + "RemoteAdministrationEndpoint":{"shape":"DNSName"}, + "PreferredSubnetId":{"shape":"SubnetId"}, + "PreferredFileServerIp":{"shape":"IpAddress"}, "ThroughputCapacity":{"shape":"MegabytesPerSecond"}, "MaintenanceOperationsInProgress":{"shape":"FileSystemMaintenanceOperations"}, "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, diff --git a/models/apis/fsx/2018-03-01/docs-2.json b/models/apis/fsx/2018-03-01/docs-2.json index 5f683ca65de..cd38e702893 100644 --- a/models/apis/fsx/2018-03-01/docs-2.json +++ b/models/apis/fsx/2018-03-01/docs-2.json @@ -196,7 +196,8 @@ "DNSName": { "base": "

The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.

", "refs": { - "FileSystem$DNSName": "

The DNS name for the file system.

" + "FileSystem$DNSName": "

The DNS name for the file system.

", + "WindowsFileSystemConfiguration$RemoteAdministrationEndpoint": "

For MULTI_AZ_1 deployment types, use this endpoint when performing administrative tasks on the file system using Amazon FSx Remote PowerShell.

For SINGLE_AZ_1 deployment types, this is the DNS name of the file system.

This endpoint is temporarily unavailable when the file system is undergoing maintenance.

" } }, "DailyTime": { @@ -335,7 +336,7 @@ "base": null, "refs": { "SelfManagedActiveDirectoryAttributes$FileSystemAdministratorsGroup": "

The name of the domain group whose members have administrative privileges for the FSx file system.

", - "SelfManagedActiveDirectoryConfiguration$FileSystemAdministratorsGroup": "

(Optional) The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, and setting audit controls (audit ACLs) on files and folders. The group that you specify must already exist in your domain. If you don't provide one, your AD domain's Domain Admins group is used.

" + "SelfManagedActiveDirectoryConfiguration$FileSystemAdministratorsGroup": "

(Optional) The name of the domain group whose members are granted administrative privileges for the file system. Administrative privileges include taking ownership of files and folders, setting audit controls (audit ACLs) on files and folders, and administering the file system remotely by using the FSx Remote PowerShell. The group that you specify must already exist in your domain. If you don't provide one, your AD domain's Domain Admins group is used.

" } }, "FileSystemFailureDetails": { @@ -366,7 +367,7 @@ "base": "

The lifecycle status of the file system.

", "refs": { "DeleteFileSystemResponse$Lifecycle": "

The file system lifecycle for the deletion request. Should be DELETING.

", - "FileSystem$Lifecycle": "

The lifecycle status of the file system:

" + "FileSystem$Lifecycle": "

The lifecycle status of the file system, following are the possible values and what they mean:

" } }, "FileSystemMaintenanceOperation": { @@ -465,7 +466,8 @@ "IpAddress": { "base": null, "refs": { - "DnsIps$member": null + "DnsIps$member": null, + "WindowsFileSystemConfiguration$PreferredFileServerIp": "

For MULTI_AZ_1 deployment types, the IP address of the primary, or preferred, file server.

Use this IP address when mounting the file system on Linux SMB clients or Windows SMB clients that are not joined to a Microsoft Active Directory. Applicable for both SINGLE_AZ_1 and MULTI_AZ_1 deployment types. This IP address is temporarily unavailable when the file system is undergoing maintenance. For Linux and Windows SMB clients that are joined to an Active Directory, use the file system's DNSName instead. For more information and instruction on mapping and mounting file shares, see https://docs.aws.amazon.com/fsx/latest/WindowsGuide/accessing-file-shares.html.

" } }, "KmsKeyId": { @@ -635,22 +637,24 @@ "StorageCapacity": { "base": "

The storage capacity for your Amazon FSx file system, in gibibytes.

", "refs": { - "CreateFileSystemRequest$StorageCapacity": "

The storage capacity of the file system being created.

For Windows file systems, the storage capacity has a minimum of 300 GiB, and a maximum of 65,536 GiB.

For Lustre file systems, the storage capacity has a minimum of 3,600 GiB. Storage capacity is provisioned in increments of 3,600 GiB.

", + "CreateFileSystemRequest$StorageCapacity": "

The storage capacity of the file system being created.

For Windows file systems, valid values are 32 GiB - 65,536 GiB.

For Lustre file systems, valid values are 1,200, 2,400, 3,600, then continuing in increments of 3600 GiB.

", "FileSystem$StorageCapacity": "

The storage capacity of the file system in gigabytes (GB).

" } }, "SubnetId": { "base": "

The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). For more information, see VPC and Subnets in the Amazon VPC User Guide.

", "refs": { + "CreateFileSystemWindowsConfiguration$PreferredSubnetId": "

Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet in which you want the preferred file server to be located. For in-AWS applications, we recommend that you launch your clients in the same Availability Zone (AZ) as your preferred file server to reduce cross-AZ data transfer costs and minimize latency.

", "InvalidNetworkSettings$InvalidSubnetId": null, - "SubnetIds$member": null + "SubnetIds$member": null, + "WindowsFileSystemConfiguration$PreferredSubnetId": "

For MULTI_AZ_1 deployment types, it specifies the ID of the subnet where the preferred file server is located. Must be one of the two subnet IDs specified in SubnetIds property. Amazon FSx serves traffic from this subnet except in the event of a failover to the secondary file server.

For SINGLE_AZ_1 deployment types, this value is the same as that for SubnetIDs.

" } }, "SubnetIds": { "base": "

A list of subnet IDs. Currently, you can specify only one subnet ID in a call to the CreateFileSystem operation.

", "refs": { "CreateFileSystemFromBackupRequest$SubnetIds": "

A list of IDs for the subnets that the file system will be accessible from. Currently, you can specify only one subnet. The file server is also launched in that subnet's Availability Zone.

", - "CreateFileSystemRequest$SubnetIds": "

The IDs of the subnets that the file system will be accessible from. File systems support only one subnet. The file server is also launched in that subnet's Availability Zone.

", + "CreateFileSystemRequest$SubnetIds": "

Specifies the IDs of the subnets that the file system will be accessible from. For Windows MULTI_AZ_1 file system deployment types, provide exactly two subnet IDs, one for the preferred file server and one for the standy file server. You specify one of these subnets as the preferred subnet using the WindowsConfiguration > PreferredSubnetID property.

For Windows SINGLE_AZ_1 file system deployment types and Lustre file systems, provide exactly one subnet ID. The file server is launched in that subnet's Availability Zone.

", "FileSystem$SubnetIds": "

The ID of the subnet to contain the endpoint for the file system. One and only one is supported. The file system is launched in the Availability Zone associated with this subnet.

" } }, @@ -757,6 +761,13 @@ "WindowsFileSystemConfiguration$WeeklyMaintenanceStartTime": "

The preferred time to perform weekly maintenance, in the UTC time zone.

" } }, + "WindowsDeploymentType": { + "base": null, + "refs": { + "CreateFileSystemWindowsConfiguration$DeploymentType": "

Specifies the file system deployment type, valid values are the following:

To learn more about high availability Multi-AZ file systems, see High Availability for Amazon FSx for Windows File Server.

", + "WindowsFileSystemConfiguration$DeploymentType": "

Specifies the file system deployment type, valid values are the following:

" + } + }, "WindowsFileSystemConfiguration": { "base": "

The configuration for this Microsoft Windows file system.

", "refs": { diff --git a/models/apis/guardduty/2017-11-28/api-2.json b/models/apis/guardduty/2017-11-28/api-2.json index cb415437b3e..871c59b60c7 100644 --- a/models/apis/guardduty/2017-11-28/api-2.json +++ b/models/apis/guardduty/2017-11-28/api-2.json @@ -96,6 +96,20 @@ {"shape":"InternalServerErrorException"} ] }, + "CreatePublishingDestination":{ + "name":"CreatePublishingDestination", + "http":{ + "method":"POST", + "requestUri":"/detector/{detectorId}/publishingDestination", + "responseCode":200 + }, + "input":{"shape":"CreatePublishingDestinationRequest"}, + "output":{"shape":"CreatePublishingDestinationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ] + }, "CreateSampleFindings":{ "name":"CreateSampleFindings", "http":{ @@ -208,6 +222,20 @@ {"shape":"InternalServerErrorException"} ] }, + "DeletePublishingDestination":{ + "name":"DeletePublishingDestination", + "http":{ + "method":"DELETE", + "requestUri":"/detector/{detectorId}/publishingDestination/{destinationId}", + "responseCode":200 + }, + "input":{"shape":"DeletePublishingDestinationRequest"}, + "output":{"shape":"DeletePublishingDestinationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ] + }, "DeleteThreatIntelSet":{ "name":"DeleteThreatIntelSet", "http":{ @@ -222,6 +250,20 @@ {"shape":"InternalServerErrorException"} ] }, + "DescribePublishingDestination":{ + "name":"DescribePublishingDestination", + "http":{ + "method":"GET", + "requestUri":"/detector/{detectorId}/publishingDestination/{destinationId}", + "responseCode":200 + }, + "input":{"shape":"DescribePublishingDestinationRequest"}, + "output":{"shape":"DescribePublishingDestinationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ] + }, "DisassociateFromMasterAccount":{ "name":"DisassociateFromMasterAccount", "http":{ @@ -474,6 +516,20 @@ {"shape":"InternalServerErrorException"} ] }, + "ListPublishingDestinations":{ + "name":"ListPublishingDestinations", + "http":{ + "method":"GET", + "requestUri":"/detector/{detectorId}/publishingDestination", + "responseCode":200 + }, + "input":{"shape":"ListPublishingDestinationsRequest"}, + "output":{"shape":"ListPublishingDestinationsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -628,6 +684,20 @@ {"shape":"InternalServerErrorException"} ] }, + "UpdatePublishingDestination":{ + "name":"UpdatePublishingDestination", + "http":{ + "method":"POST", + "requestUri":"/detector/{detectorId}/publishingDestination/{destinationId}", + "responseCode":200 + }, + "input":{"shape":"UpdatePublishingDestinationRequest"}, + "output":{"shape":"UpdatePublishingDestinationResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"InternalServerErrorException"} + ] + }, "UpdateThreatIntelSet":{ "name":"UpdateThreatIntelSet", "http":{ @@ -1073,6 +1143,44 @@ } } }, + "CreatePublishingDestinationRequest":{ + "type":"structure", + "required":[ + "DetectorId", + "DestinationType", + "DestinationProperties" + ], + "members":{ + "DetectorId":{ + "shape":"DetectorId", + "location":"uri", + "locationName":"detectorId" + }, + "DestinationType":{ + "shape":"DestinationType", + "locationName":"destinationType" + }, + "DestinationProperties":{ + "shape":"DestinationProperties", + "locationName":"destinationProperties" + }, + "ClientToken":{ + "shape":"ClientToken", + "idempotencyToken":true, + "locationName":"clientToken" + } + } + }, + "CreatePublishingDestinationResponse":{ + "type":"structure", + "required":["DestinationId"], + "members":{ + "DestinationId":{ + "shape":"String", + "locationName":"destinationId" + } + } + }, "CreateSampleFindingsRequest":{ "type":"structure", "required":["DetectorId"], @@ -1282,6 +1390,30 @@ } } }, + "DeletePublishingDestinationRequest":{ + "type":"structure", + "required":[ + "DetectorId", + "DestinationId" + ], + "members":{ + "DetectorId":{ + "shape":"DetectorId", + "location":"uri", + "locationName":"detectorId" + }, + "DestinationId":{ + "shape":"String", + "location":"uri", + "locationName":"destinationId" + } + } + }, + "DeletePublishingDestinationResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteThreatIntelSetRequest":{ "type":"structure", "required":[ @@ -1306,6 +1438,102 @@ "members":{ } }, + "DescribePublishingDestinationRequest":{ + "type":"structure", + "required":[ + "DetectorId", + "DestinationId" + ], + "members":{ + "DetectorId":{ + "shape":"DetectorId", + "location":"uri", + "locationName":"detectorId" + }, + "DestinationId":{ + "shape":"String", + "location":"uri", + "locationName":"destinationId" + } + } + }, + "DescribePublishingDestinationResponse":{ + "type":"structure", + "required":[ + "DestinationId", + "DestinationType", + "Status", + "PublishingFailureStartTimestamp", + "DestinationProperties" + ], + "members":{ + "DestinationId":{ + "shape":"String", + "locationName":"destinationId" + }, + "DestinationType":{ + "shape":"DestinationType", + "locationName":"destinationType" + }, + "Status":{ + "shape":"PublishingStatus", + "locationName":"status" + }, + "PublishingFailureStartTimestamp":{ + "shape":"Long", + "locationName":"publishingFailureStartTimestamp" + }, + "DestinationProperties":{ + "shape":"DestinationProperties", + "locationName":"destinationProperties" + } + } + }, + "Destination":{ + "type":"structure", + "required":[ + "DestinationId", + "DestinationType", + "Status" + ], + "members":{ + "DestinationId":{ + "shape":"String", + "locationName":"destinationId" + }, + "DestinationType":{ + "shape":"DestinationType", + "locationName":"destinationType" + }, + "Status":{ + "shape":"PublishingStatus", + "locationName":"status" + } + } + }, + "DestinationProperties":{ + "type":"structure", + "members":{ + "DestinationArn":{ + "shape":"String", + "locationName":"destinationArn" + }, + "KmsKeyArn":{ + "shape":"String", + "locationName":"kmsKeyArn" + } + } + }, + "DestinationType":{ + "type":"string", + "enum":["S3"], + "max":300, + "min":1 + }, + "Destinations":{ + "type":"list", + "member":{"shape":"Destination"} + }, "DetectorId":{ "type":"string", "max":300, @@ -2326,6 +2554,41 @@ } } }, + "ListPublishingDestinationsRequest":{ + "type":"structure", + "required":["DetectorId"], + "members":{ + "DetectorId":{ + "shape":"DetectorId", + "location":"uri", + "locationName":"detectorId" + }, + "MaxResults":{ + "shape":"MaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListPublishingDestinationsResponse":{ + "type":"structure", + "required":["Destinations"], + "members":{ + "Destinations":{ + "shape":"Destinations", + "locationName":"destinations" + }, + "NextToken":{ + "shape":"String", + "locationName":"nextToken" + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceArn"], @@ -2655,6 +2918,17 @@ "type":"list", "member":{"shape":"ProductCode"} }, + "PublishingStatus":{ + "type":"string", + "enum":[ + "PENDING_VERIFICATION", + "PUBLISHING", + "UNABLE_TO_PUBLISH_FIX_DESTINATION_PROPERTY", + "STOPPED" + ], + "max":300, + "min":1 + }, "RemoteIpDetails":{ "type":"structure", "members":{ @@ -3166,6 +3440,34 @@ "members":{ } }, + "UpdatePublishingDestinationRequest":{ + "type":"structure", + "required":[ + "DetectorId", + "DestinationId" + ], + "members":{ + "DetectorId":{ + "shape":"DetectorId", + "location":"uri", + "locationName":"detectorId" + }, + "DestinationId":{ + "shape":"String", + "location":"uri", + "locationName":"destinationId" + }, + "DestinationProperties":{ + "shape":"DestinationProperties", + "locationName":"destinationProperties" + } + } + }, + "UpdatePublishingDestinationResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateThreatIntelSetRequest":{ "type":"structure", "required":[ diff --git a/models/apis/guardduty/2017-11-28/docs-2.json b/models/apis/guardduty/2017-11-28/docs-2.json index 644fa23771b..c9edd87f752 100644 --- a/models/apis/guardduty/2017-11-28/docs-2.json +++ b/models/apis/guardduty/2017-11-28/docs-2.json @@ -1,29 +1,32 @@ { "version": "2.0", - "service": "

Amazon GuardDuty is a continuous security monitoring service that analyzes and processes the following data sources: VPC Flow Logs, AWS CloudTrail event logs, and DNS logs. It uses threat intelligence feeds, such as lists of malicious IPs and domains, and machine learning to identify unexpected and potentially unauthorized and malicious activity within your AWS environment. This can include issues like escalations of privileges, uses of exposed credentials, or communication with malicious IPs, URLs, or domains. For example, GuardDuty can detect compromised EC2 instances serving malware or mining bitcoin. It also monitors AWS account access behavior for signs of compromise, such as unauthorized infrastructure deployments, like instances deployed in a region that has never been used, or unusual API calls, like a password policy change to reduce password strength. GuardDuty informs you of the status of your AWS environment by producing security findings that you can view in the GuardDuty console or through Amazon CloudWatch events. For more information, see Amazon GuardDuty User Guide.

", + "service": "

Amazon GuardDuty is a continuous security monitoring service that analyzes and processes the following data sources: VPC Flow Logs, AWS CloudTrail event logs, and DNS logs. It uses threat intelligence feeds, such as lists of malicious IPs and domains, and machine learning to identify unexpected and potentially unauthorized and malicious activity within your AWS environment. This can include issues like escalations of privileges, uses of exposed credentials, or communication with malicious IPs, URLs, or domains. For example, GuardDuty can detect compromised EC2 instances serving malware or mining bitcoin. It also monitors AWS account access behavior for signs of compromise, such as unauthorized infrastructure deployments, like instances deployed in a region that has never been used, or unusual API calls, like a password policy change to reduce password strength. GuardDuty informs you of the status of your AWS environment by producing security findings that you can view in the GuardDuty console or through Amazon CloudWatch events. For more information, see Amazon GuardDuty User Guide.

", "operations": { "AcceptInvitation": "

Accepts the invitation to be monitored by a master GuardDuty account.

", "ArchiveFindings": "

Archives GuardDuty findings specified by the list of finding IDs.

Only the master account can archive findings. Member accounts do not have permission to archive findings from their accounts.

", "CreateDetector": "

Creates a single Amazon GuardDuty detector. A detector is a resource that represents the GuardDuty service. To start using GuardDuty, you must create a detector in each region that you enable the service. You can have only one detector per account per region.

", "CreateFilter": "

Creates a filter using the specified finding criteria.

", - "CreateIPSet": "

Creates a new IPSet - a list of trusted IP addresses that have been whitelisted for secure communication with AWS infrastructure and applications.

", + "CreateIPSet": "

Creates a new IPSet, called Trusted IP list in the consoler user interface. An IPSet is a list IP addresses trusted for secure communication with AWS infrastructure and applications. GuardDuty does not generate findings for IP addresses included in IPSets. Only users from the master account can use this operation.

", "CreateMembers": "

Creates member accounts of the current AWS account by specifying a list of AWS account IDs. The current AWS account can then invite these members to manage GuardDuty in their accounts.

", - "CreateSampleFindings": "

Generates example findings of types specified by the list of finding types. If 'NULL' is specified for findingTypes, the API generates example findings of all supported finding types.

", - "CreateThreatIntelSet": "

Create a new ThreatIntelSet. ThreatIntelSets consist of known malicious IP addresses. GuardDuty generates findings based on ThreatIntelSets.

", + "CreatePublishingDestination": "

Creates a publishing destination to send findings to. The resource to send findings to must exist before you use this operation.

", + "CreateSampleFindings": "

Generates example findings of types specified by the list of finding types. If 'NULL' is specified for findingTypes, the API generates example findings of all supported finding types.

", + "CreateThreatIntelSet": "

Create a new ThreatIntelSet. ThreatIntelSets consist of known malicious IP addresses. GuardDuty generates findings based on ThreatIntelSets. Only users of the master account can use this operation.

", "DeclineInvitations": "

Declines invitations sent to the current member account by AWS account specified by their account IDs.

", "DeleteDetector": "

Deletes a Amazon GuardDuty detector specified by the detector ID.

", "DeleteFilter": "

Deletes the filter specified by the filter name.

", - "DeleteIPSet": "

Deletes the IPSet specified by the IPSet ID.

", + "DeleteIPSet": "

Deletes the IPSet specified by the ipSetId. IPSets are called Trusted IP lists in the console user interface.

", "DeleteInvitations": "

Deletes invitations sent to the current member account by AWS accounts specified by their account IDs.

", "DeleteMembers": "

Deletes GuardDuty member accounts (to the current GuardDuty master account) specified by the account IDs.

", + "DeletePublishingDestination": "

Deletes the publishing definition with the specified destinationId.

", "DeleteThreatIntelSet": "

Deletes ThreatIntelSet specified by the ThreatIntelSet ID.

", + "DescribePublishingDestination": "

Returns information about the publishing destination specified by the provided destinationId.

", "DisassociateFromMasterAccount": "

Disassociates the current GuardDuty member account from its master account.

", "DisassociateMembers": "

Disassociates GuardDuty member accounts (to the current GuardDuty master account) specified by the account IDs.

", "GetDetector": "

Retrieves an Amazon GuardDuty detector specified by the detectorId.

", "GetFilter": "

Returns the details of the filter specified by the filter name.

", "GetFindings": "

Describes Amazon GuardDuty findings specified by finding IDs.

", "GetFindingsStatistics": "

Lists Amazon GuardDuty findings' statistics for the specified detector ID.

", - "GetIPSet": "

Retrieves the IPSet specified by the IPSet ID.

", + "GetIPSet": "

Retrieves the IPSet specified by the ipSetId.

", "GetInvitationsCount": "

Returns the count of all GuardDuty membership invitations that were sent to the current member account except the currently accepted invitation.

", "GetMasterAccount": "

Provides the details for the GuardDuty master account associated with the current GuardDuty member account.

", "GetMembers": "

Retrieves GuardDuty member accounts (to the current GuardDuty master account) specified by the account IDs.

", @@ -32,20 +35,22 @@ "ListDetectors": "

Lists detectorIds of all the existing Amazon GuardDuty detector resources.

", "ListFilters": "

Returns a paginated list of the current filters.

", "ListFindings": "

Lists Amazon GuardDuty findings for the specified detector ID.

", - "ListIPSets": "

Lists the IPSets of the GuardDuty service specified by the detector ID.

", + "ListIPSets": "

Lists the IPSets of the GuardDuty service specified by the detector ID. If you use this operation from a member account, the IPSets returned are the IPSets from the associated master account.

", "ListInvitations": "

Lists all GuardDuty membership invitations that were sent to the current AWS account.

", "ListMembers": "

Lists details about all member accounts for the current GuardDuty master account.

", + "ListPublishingDestinations": "

Returns a list of publishing destinations associated with the specified dectectorId.

", "ListTagsForResource": "

Lists tags for a resource. Tagging is currently supported for detectors, finding filters, IP sets, and Threat Intel sets, with a limit of 50 tags per resource. When invoked, this operation returns all assigned tags for a given resource..

", - "ListThreatIntelSets": "

Lists the ThreatIntelSets of the GuardDuty service specified by the detector ID.

", - "StartMonitoringMembers": "

Re-enables GuardDuty to monitor findings of the member accounts specified by the account IDs. A master GuardDuty account can run this command after disabling GuardDuty from monitoring these members' findings by running StopMonitoringMembers.

", - "StopMonitoringMembers": "

Disables GuardDuty from monitoring findings of the member accounts specified by the account IDs. After running this command, a master GuardDuty account can run StartMonitoringMembers to re-enable GuardDuty to monitor these members’ findings.

", + "ListThreatIntelSets": "

Lists the ThreatIntelSets of the GuardDuty service specified by the detector ID. If you use this operation from a member account, the ThreatIntelSets associated with the master account are returned.

", + "StartMonitoringMembers": "

Turns on GuardDuty monitoring of the specified member accounts. Use this operation to restart monitoring of accounts that you stopped monitoring with the StopMonitoringMembers operation.

", + "StopMonitoringMembers": "

Stops GuardDuty monitoring for the specified member accounnts. Use the StartMonitoringMembers to restart monitoring for those accounts.

", "TagResource": "

Adds tags to a resource.

", - "UnarchiveFindings": "

Unarchives Amazon GuardDuty findings specified by the list of finding IDs.

", + "UnarchiveFindings": "

Unarchives GuardDuty findings specified by the findingIds.

", "UntagResource": "

Removes tags from a resource.

", - "UpdateDetector": "

Updates an Amazon GuardDuty detector specified by the detectorId.

", + "UpdateDetector": "

Updates the Amazon GuardDuty detector specified by the detectorId.

", "UpdateFilter": "

Updates the filter specified by the filter name.

", - "UpdateFindingsFeedback": "

Marks specified Amazon GuardDuty findings as useful or not useful.

", + "UpdateFindingsFeedback": "

Marks the specified GuardDuty findings as useful or not useful.

", "UpdateIPSet": "

Updates the IPSet specified by the IPSet ID.

", + "UpdatePublishingDestination": "

Updates information about the publishing destination specified by the destinationId.

", "UpdateThreatIntelSet": "

Updates the ThreatIntelSet specified by ThreatIntelSet ID.

" }, "shapes": { @@ -82,7 +87,7 @@ "refs": { "AccountDetail$AccountId": "

Member account ID.

", "AccountIds$member": null, - "Invitation$AccountId": "

Inviter account ID

", + "Invitation$AccountId": "

The ID of the account from which the invitations was sent.

", "Master$AccountId": "

The ID of the account used as the Master account.

", "Member$AccountId": "

Member account ID.

", "UnprocessedAccount$AccountId": "

AWS Account ID.

" @@ -97,7 +102,7 @@ "DisassociateMembersRequest$AccountIds": "

A list of account IDs of the GuardDuty member accounts that you want to disassociate from master.

", "GetMembersRequest$AccountIds": "

A list of account IDs of the GuardDuty member accounts that you want to describe.

", "InviteMembersRequest$AccountIds": "

A list of account IDs of the accounts that you want to invite to GuardDuty as members.

", - "StartMonitoringMembersRequest$AccountIds": "

A list of account IDs of the GuardDuty member accounts whose findings you want the master account to monitor.

", + "StartMonitoringMembersRequest$AccountIds": "

A list of account IDs of the GuardDuty member accounts to start monitoring.

", "StopMonitoringMembersRequest$AccountIds": "

A list of account IDs of the GuardDuty member accounts whose findings you want the master account to stop monitoring.

" } }, @@ -138,7 +143,7 @@ "NetworkConnectionAction$Blocked": "

Network connection blocked information.

", "PortProbeAction$Blocked": "

Port probe blocked information.

", "Service$Archived": "

Indicates whether this finding is archived.

", - "UpdateDetectorRequest$Enable": "

Updated boolean value for the detector that specifies whether the detector is enabled.

", + "UpdateDetectorRequest$Enable": "

Specifies whether the detector is enabled or not enabled.

", "UpdateIPSetRequest$Activate": "

The updated boolean value that specifies whether the IPSet is active or not.

", "UpdateThreatIntelSetRequest$Activate": "

The updated boolean value that specifies whether the ThreateIntelSet is active or not.

" } @@ -155,6 +160,7 @@ "CreateDetectorRequest$ClientToken": "

The idempotency token for the create request.

", "CreateFilterRequest$ClientToken": "

The idempotency token for the create request.

", "CreateIPSetRequest$ClientToken": "

The idempotency token for the create request.

", + "CreatePublishingDestinationRequest$ClientToken": "

The idempotency token for the request.

", "CreateThreatIntelSetRequest$ClientToken": "

The idempotency token for the create request.

" } }, @@ -171,7 +177,7 @@ } }, "Country": { - "base": "

Contains information about the country.

", + "base": "

Contains information about the country in which the remote IP address is located.

", "refs": { "RemoteIpDetails$Country": "

Country code of the remote IP address.

" } @@ -216,6 +222,16 @@ "refs": { } }, + "CreatePublishingDestinationRequest": { + "base": null, + "refs": { + } + }, + "CreatePublishingDestinationResponse": { + "base": null, + "refs": { + } + }, "CreateSampleFindingsRequest": { "base": null, "refs": { @@ -302,6 +318,16 @@ "refs": { } }, + "DeletePublishingDestinationRequest": { + "base": null, + "refs": { + } + }, + "DeletePublishingDestinationResponse": { + "base": null, + "refs": { + } + }, "DeleteThreatIntelSetRequest": { "base": null, "refs": { @@ -312,6 +338,44 @@ "refs": { } }, + "DescribePublishingDestinationRequest": { + "base": null, + "refs": { + } + }, + "DescribePublishingDestinationResponse": { + "base": null, + "refs": { + } + }, + "Destination": { + "base": "

Contains information about a publishing destination, including the ID, type, and status.

", + "refs": { + "Destinations$member": null + } + }, + "DestinationProperties": { + "base": "

Contains the ARN of the resource to publish to, such as an S3 bucket, and the ARN of the KMS key to use to encrypt published findings.

", + "refs": { + "CreatePublishingDestinationRequest$DestinationProperties": "

Properties of the publishing destination, including the ARNs for the destination and the KMS key used for encryption.

", + "DescribePublishingDestinationResponse$DestinationProperties": "

A DestinationProperties object that includes the DestinationArn and KmsKeyArn of the publishing destination.

", + "UpdatePublishingDestinationRequest$DestinationProperties": "

A DestinationProperties object that includes the DestinationArn and KmsKeyArn of the publishing destination.

" + } + }, + "DestinationType": { + "base": null, + "refs": { + "CreatePublishingDestinationRequest$DestinationType": "

The type of resource for the publishing destination. Currently only S3 is supported.

", + "DescribePublishingDestinationResponse$DestinationType": "

The type of the publishing destination. Currently, only S3 is supported.

", + "Destination$DestinationType": "

The type of resource used for the publishing destination. Currently, only S3 is supported.

" + } + }, + "Destinations": { + "base": null, + "refs": { + "ListPublishingDestinationsResponse$Destinations": "

A Destinations obect that includes information about each publishing destination returned.

" + } + }, "DetectorId": { "base": null, "refs": { @@ -321,13 +385,16 @@ "CreateFilterRequest$DetectorId": "

The unique ID of the detector of the GuardDuty account for which you want to create a filter.

", "CreateIPSetRequest$DetectorId": "

The unique ID of the detector of the GuardDuty account for which you want to create an IPSet.

", "CreateMembersRequest$DetectorId": "

The unique ID of the detector of the GuardDuty account with which you want to associate member accounts.

", + "CreatePublishingDestinationRequest$DetectorId": "

The ID of the GuardDuty detector associated with the publishing destination.

", "CreateSampleFindingsRequest$DetectorId": "

The ID of the detector to create sample findings for.

", "CreateThreatIntelSetRequest$DetectorId": "

The unique ID of the detector of the GuardDuty account for which you want to create a threatIntelSet.

", "DeleteDetectorRequest$DetectorId": "

The unique ID of the detector that you want to delete.

", "DeleteFilterRequest$DetectorId": "

The unique ID of the detector the filter is associated with.

", - "DeleteIPSetRequest$DetectorId": "

The unique ID of the detector the ipSet is associated with.

", + "DeleteIPSetRequest$DetectorId": "

The unique ID of the detector associated with the IPSet.

", "DeleteMembersRequest$DetectorId": "

The unique ID of the detector of the GuardDuty account whose members you want to delete.

", + "DeletePublishingDestinationRequest$DetectorId": "

The unique ID of the detector associated with the publishing destination to delete.

", "DeleteThreatIntelSetRequest$DetectorId": "

The unique ID of the detector the threatIntelSet is associated with.

", + "DescribePublishingDestinationRequest$DetectorId": "

The unique ID of the detector associated with the publishing destination to retrieve.

", "DetectorIds$member": null, "DisassociateFromMasterAccountRequest$DetectorId": "

The unique ID of the detector of the GuardDuty member account.

", "DisassociateMembersRequest$DetectorId": "

The unique ID of the detector of the GuardDuty account whose members you want to disassociate from master.

", @@ -344,16 +411,18 @@ "ListFindingsRequest$DetectorId": "

The ID of the detector that specifies the GuardDuty service whose findings you want to list.

", "ListIPSetsRequest$DetectorId": "

The unique ID of the detector the ipSet is associated with.

", "ListMembersRequest$DetectorId": "

The unique ID of the detector the member is associated with.

", + "ListPublishingDestinationsRequest$DetectorId": "

The ID of the detector to retrieve publishing destinations for.

", "ListThreatIntelSetsRequest$DetectorId": "

The unique ID of the detector the threatIntelSet is associated with.

", "Member$DetectorId": "

Member account's detector ID.

", "Service$DetectorId": "

Detector ID for the GuardDuty service.

", - "StartMonitoringMembersRequest$DetectorId": "

The unique ID of the detector of the GuardDuty account whom you want to re-enable to monitor members' findings.

", + "StartMonitoringMembersRequest$DetectorId": "

The unique ID of the detector of the GuardDuty master account associated with the member accounts to monitor.

", "StopMonitoringMembersRequest$DetectorId": "

The unique ID of the detector of the GuardDuty account that you want to stop from monitor members' findings.

", - "UnarchiveFindingsRequest$DetectorId": "

The ID of the detector that specifies the GuardDuty service whose findings you want to unarchive.

", - "UpdateDetectorRequest$DetectorId": "

The unique ID of the detector that you want to update.

", + "UnarchiveFindingsRequest$DetectorId": "

The ID of the detector associated with the findings to unarchive.

", + "UpdateDetectorRequest$DetectorId": "

The unique ID of the detector to update.

", "UpdateFilterRequest$DetectorId": "

The unique ID of the detector that specifies the GuardDuty service where you want to update a filter.

", - "UpdateFindingsFeedbackRequest$DetectorId": "

The ID of the detector that specifies the GuardDuty service whose findings you want to mark as useful or not useful.

", + "UpdateFindingsFeedbackRequest$DetectorId": "

The ID of the detector associated with the findings to update feedback for.

", "UpdateIPSetRequest$DetectorId": "

The detectorID that specifies the GuardDuty service whose IPSet you want to update.

", + "UpdatePublishingDestinationRequest$DetectorId": "

The ID of the

", "UpdateThreatIntelSetRequest$DetectorId": "

The detectorID that specifies the GuardDuty service whose ThreatIntelSet you want to update.

" } }, @@ -390,7 +459,7 @@ } }, "DnsRequestAction": { - "base": "

Contains information about the DNS request.

", + "base": "

Contains information about the DNS_REQUEST action described in this finding.

", "refs": { "Action$DnsRequestAction": "

Information about the DNS_REQUEST action described in this finding.

" } @@ -420,7 +489,7 @@ "Eq": { "base": null, "refs": { - "Condition$Eq": "

Deprecated. Represents the equal condition to be applied to a single field when querying for findings.

" + "Condition$Eq": "

Represents the equal condition to be applied to a single field when querying for findings.

" } }, "Equals": { @@ -438,7 +507,7 @@ "Feedback": { "base": null, "refs": { - "UpdateFindingsFeedbackRequest$Feedback": "

Valid values: USEFUL | NOT_USEFUL

" + "UpdateFindingsFeedbackRequest$Feedback": "

The feedback for the finding.

" } }, "FilterAction": { @@ -482,18 +551,18 @@ } }, "Finding": { - "base": "

Contains information about the finding.

", + "base": "

Contains information about the finding, which is generated when abnormal or suspicious activity is detected.

", "refs": { "Findings$member": null } }, "FindingCriteria": { - "base": "

Contains finding criteria information.

", + "base": "

Contains information about the criteria used for querying findings.

", "refs": { "CreateFilterRequest$FindingCriteria": "

Represents the criteria to be used in the filter for querying findings.

", "GetFilterResponse$FindingCriteria": "

Represents the criteria to be used in the filter for querying findings.

", "GetFindingsStatisticsRequest$FindingCriteria": "

Represents the criteria used for querying findings.

", - "ListFindingsRequest$FindingCriteria": "

Represents the criteria used for querying findings.

", + "ListFindingsRequest$FindingCriteria": "

Represents the criteria used for querying findings. Valid values include:

", "UpdateFilterRequest$FindingCriteria": "

Represents the criteria to be used in the filter for querying findings.

" } }, @@ -509,7 +578,7 @@ "ArchiveFindingsRequest$FindingIds": "

IDs of the findings that you want to archive.

", "GetFindingsRequest$FindingIds": "

IDs of the findings that you want to retrieve.

", "ListFindingsResponse$FindingIds": "

The IDs of the findings you are listing.

", - "UnarchiveFindingsRequest$FindingIds": "

IDs of the findings that you want to unarchive.

", + "UnarchiveFindingsRequest$FindingIds": "

IDs of the findings to unarchive.

", "UpdateFindingsFeedbackRequest$FindingIds": "

IDs of the findings that you want to mark as useful or not useful.

" } }, @@ -518,7 +587,7 @@ "refs": { "CreateDetectorRequest$FindingPublishingFrequency": "

A enum value that specifies how frequently customer got Finding updates published.

", "GetDetectorResponse$FindingPublishingFrequency": "

Finding publishing frequency.

", - "UpdateDetectorRequest$FindingPublishingFrequency": "

A enum value that specifies how frequently customer got Finding updates published.

" + "UpdateDetectorRequest$FindingPublishingFrequency": "

A enum value that specifies how frequently findings are exported, such as to CloudWatch Events.

" } }, "FindingStatisticType": { @@ -549,7 +618,7 @@ "FindingTypes": { "base": null, "refs": { - "CreateSampleFindingsRequest$FindingTypes": "

Types of sample findings that you want to generate.

" + "CreateSampleFindingsRequest$FindingTypes": "

Types of sample findings to generate.

" } }, "Findings": { @@ -559,7 +628,7 @@ } }, "GeoLocation": { - "base": "

Contains information about the

", + "base": "

Contains information about the location of the remote IP address.

", "refs": { "RemoteIpDetails$GeoLocation": "

Location information of the remote IP address.

" } @@ -658,12 +727,12 @@ "base": null, "refs": { "ListTagsForResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) for the given GuardDuty resource

", - "TagResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) for the given GuardDuty resource

", - "UntagResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) for the given GuardDuty resource

" + "TagResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) for the GuardDuty resource to apply a tag to.

", + "UntagResourceRequest$ResourceArn": "

The Amazon Resource Name (ARN) for the resource to remove tags from.

" } }, "IamInstanceProfile": { - "base": "

Contains information about the instance profile.

", + "base": "

Contains information about the EC2 instance profile.

", "refs": { "InstanceDetails$IamInstanceProfile": "

The profile information of the EC2 instance.

" } @@ -677,10 +746,10 @@ "Integer": { "base": null, "refs": { - "Condition$Gt": "

Deprecated. Represents a greater than condition to be applied to a single field when querying for findings.

", - "Condition$Gte": "

Deprecated. Represents a greater than equal condition to be applied to a single field when querying for findings.

", - "Condition$Lt": "

Deprecated. Represents a less than condition to be applied to a single field when querying for findings.

", - "Condition$Lte": "

Deprecated. Represents a less than equal condition to be applied to a single field when querying for findings.

", + "Condition$Gt": "

Represents a greater than condition to be applied to a single field when querying for findings.

", + "Condition$Gte": "

Represents a greater than equal condition to be applied to a single field when querying for findings.

", + "Condition$Lt": "

Represents a less than condition to be applied to a single field when querying for findings.

", + "Condition$Lte": "

Represents a less than equal condition to be applied to a single field when querying for findings.

", "CountBySeverity$value": null, "GetInvitationsCountResponse$InvitationsCount": "

The number of received invitations.

", "LocalPortDetails$Port": "

Port number of the local connection.

", @@ -694,7 +763,7 @@ } }, "Invitation": { - "base": "

Contains information about the invitation.

", + "base": "

Contains information about the invitation to become a member account.

", "refs": { "Invitations$member": null } @@ -800,6 +869,16 @@ "refs": { } }, + "ListPublishingDestinationsRequest": { + "base": null, + "refs": { + } + }, + "ListPublishingDestinationsResponse": { + "base": null, + "refs": { + } + }, "ListTagsForResourceRequest": { "base": null, "refs": { @@ -844,7 +923,8 @@ "Condition$GreaterThan": "

Represents a greater than condition to be applied to a single field when querying for findings.

", "Condition$GreaterThanOrEqual": "

Represents a greater than equal condition to be applied to a single field when querying for findings.

", "Condition$LessThan": "

Represents a less than condition to be applied to a single field when querying for findings.

", - "Condition$LessThanOrEqual": "

Represents a less than equal condition to be applied to a single field when querying for findings.

" + "Condition$LessThanOrEqual": "

Represents a less than equal condition to be applied to a single field when querying for findings.

", + "DescribePublishingDestinationResponse$PublishingFailureStartTimestamp": "

The time, in epoch millisecond format, at which GuardDuty was first unable to publish findings to the destination.

" } }, "Master": { @@ -862,6 +942,7 @@ "ListIPSetsRequest$MaxResults": "

You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.

", "ListInvitationsRequest$MaxResults": "

You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.

", "ListMembersRequest$MaxResults": "

You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.

", + "ListPublishingDestinationsRequest$MaxResults": "

The maximum number of results to return in the response.

", "ListThreatIntelSetsRequest$MaxResults": "

You can use this parameter to indicate the maximum number of items you want in the response. The default value is 50. The maximum value is 50.

" } }, @@ -883,7 +964,7 @@ "refs": { "CreateIPSetRequest$Name": "

The user friendly name to identify the IPSet. This name is displayed in all findings that are triggered by activity that involves IP addresses included in this IPSet.

", "CreateThreatIntelSetRequest$Name": "

A user-friendly ThreatIntelSet name that is displayed in all finding generated by activity that involves IP addresses included in this ThreatIntelSet.

", - "GetIPSetResponse$Name": "

The user friendly name to identify the IPSet. This name is displayed in all findings that are triggered by activity that involves IP addresses included in this IPSet.

", + "GetIPSetResponse$Name": "

The user friendly name for the IPSet.

", "GetThreatIntelSetResponse$Name": "

A user-friendly ThreatIntelSet name that is displayed in all finding generated by activity that involves IP addresses included in this ThreatIntelSet.

", "UpdateIPSetRequest$Name": "

The unique ID that specifies the IPSet that you want to update.

", "UpdateThreatIntelSetRequest$Name": "

The unique ID that specifies the ThreatIntelSet that you want to update.

" @@ -892,17 +973,17 @@ "Neq": { "base": null, "refs": { - "Condition$Neq": "

Deprecated. Represents the not equal condition to be applied to a single field when querying for findings.

" + "Condition$Neq": "

Represents the not equal condition to be applied to a single field when querying for findings.

" } }, "NetworkConnectionAction": { - "base": "

Contains information about the network connection.

", + "base": "

Contains information about the NETWORK_CONNECTION action described in the finding.

", "refs": { "Action$NetworkConnectionAction": "

Information about the NETWORK_CONNECTION action described in this finding.

" } }, "NetworkInterface": { - "base": "

Contains information about the network interface.

", + "base": "

Contains information about the network interface of the Ec2 instance.

", "refs": { "NetworkInterfaces$member": null } @@ -926,13 +1007,13 @@ } }, "Organization": { - "base": "

Continas information about the organization.

", + "base": "

Continas information about the ISP organization of the remote IP address.

", "refs": { "RemoteIpDetails$Organization": "

ISP Organization information of the remote IP address.

" } }, "PortProbeAction": { - "base": "

Contains information about the port probe.

", + "base": "

Contains information about the PORT_PROBE action described in the finding.

", "refs": { "Action$PortProbeAction": "

Information about the PORT_PROBE action described in this finding.

" } @@ -950,7 +1031,7 @@ } }, "PrivateIpAddressDetails": { - "base": "

Contains information about the private IP address.

", + "base": "

Contains other private IP address information of the EC2 instance.

", "refs": { "PrivateIpAddresses$member": null } @@ -962,7 +1043,7 @@ } }, "ProductCode": { - "base": "

Contains information about the product code.

", + "base": "

Contains information about the product code for the Ec2 instance.

", "refs": { "ProductCodes$member": null } @@ -973,8 +1054,15 @@ "InstanceDetails$ProductCodes": "

The product code of the EC2 instance.

" } }, + "PublishingStatus": { + "base": null, + "refs": { + "DescribePublishingDestinationResponse$Status": "

The status of the publishing destination.

", + "Destination$Status": "

The status of the publishing destination.

" + } + }, "RemoteIpDetails": { - "base": "

Continas information about the remote IP address.

", + "base": "

Continas information about the remote IP address of the connection.

", "refs": { "AwsApiCallAction$RemoteIpDetails": "

Remote IP information of the connection.

", "NetworkConnectionAction$RemoteIpDetails": "

Remote IP information of the connection.

", @@ -988,13 +1076,13 @@ } }, "Resource": { - "base": "

Contains information about the resource.

", + "base": "

Contains information about the AWS resource associated with the activity that prompted GuardDuty to generate a finding.

", "refs": { "Finding$Resource": null } }, "SecurityGroup": { - "base": "

Contains information about the security group.

", + "base": "

Contains information about the security groups associated with the EC2 instance.

", "refs": { "SecurityGroups$member": null } @@ -1006,13 +1094,13 @@ } }, "Service": { - "base": "

Contains information about the service.

", + "base": "

Contains additional information about the generated finding.

", "refs": { "Finding$Service": null } }, "SortCriteria": { - "base": "

Contains information about the criteria for sorting.

", + "base": "

Contains information about the criteria used for sorting findings.

", "refs": { "GetFindingsRequest$SortCriteria": "

Represents the criteria used for sorting findings.

", "ListFindingsRequest$SortCriteria": "

Represents the criteria used for sorting findings.

" @@ -1058,12 +1146,19 @@ "Country$CountryCode": "

Country code of the remote IP address.

", "Country$CountryName": "

Country name of the remote IP address.

", "CreateIPSetResponse$IpSetId": "

The ID of the IPSet resource.

", + "CreatePublishingDestinationResponse$DestinationId": "

The ID of the publishing destination created.

", "CreateThreatIntelSetResponse$ThreatIntelSetId": "

The ID of the ThreatIntelSet resource.

", "Criterion$key": null, "DeleteFilterRequest$FilterName": "

The name of the filter you want to delete.

", - "DeleteIPSetRequest$IpSetId": "

The unique ID of the ipSet you want to delete.

", + "DeleteIPSetRequest$IpSetId": "

The unique ID of the IPSet to delete.

", + "DeletePublishingDestinationRequest$DestinationId": "

The ID of the publishing destination to delete.

", "DeleteThreatIntelSetRequest$ThreatIntelSetId": "

The unique ID of the threatIntelSet you want to delete.

", - "DnsRequestAction$Domain": "

Domain information for the DNS request.

", + "DescribePublishingDestinationRequest$DestinationId": "

The ID of the publishing destination to retrieve.

", + "DescribePublishingDestinationResponse$DestinationId": "

The ID of the publishing destination.

", + "Destination$DestinationId": "

The unique ID of the publishing destination.

", + "DestinationProperties$DestinationArn": "

The ARN of the resource to publish to.

", + "DestinationProperties$KmsKeyArn": "

The ARN of the KMS key to use for encryption.

", + "DnsRequestAction$Domain": "

Domain information for the API request.

", "DomainDetails$Domain": "

Domain information for the AWS API call.

", "Eq$member": null, "Equals$member": null, @@ -1081,7 +1176,7 @@ "GetDetectorResponse$ServiceRole": "

The GuardDuty service role.

", "GetDetectorResponse$UpdatedAt": "

Detector last update timestamp.

", "GetFilterRequest$FilterName": "

The name of the filter you want to get.

", - "GetIPSetRequest$IpSetId": "

The unique ID of the ipSet you want to get.

", + "GetIPSetRequest$IpSetId": "

The unique ID of the IPSet to retrieve.

", "GetThreatIntelSetRequest$ThreatIntelSetId": "

The unique ID of the threatIntelSet you want to get.

", "IamInstanceProfile$Arn": "

AWS EC2 instance profile ARN.

", "IamInstanceProfile$Id": "

AWS EC2 instance profile ID.

", @@ -1095,9 +1190,9 @@ "InstanceDetails$Platform": "

The platform of the EC2 instance.

", "InternalServerErrorException$Message": "

The error message.

", "InternalServerErrorException$Type": "

The error type.

", - "Invitation$InvitationId": "

This value is used to validate the inviter account to the member account.

", + "Invitation$InvitationId": "

The ID of the invitation. This value is used to validate the inviter account to the member account.

", "Invitation$RelationshipStatus": "

The status of the relationship between the inviter and invitee accounts.

", - "Invitation$InvitedAt": "

Timestamp at which the invitation was sent

", + "Invitation$InvitedAt": "

Timestamp at which the invitation was sent.

", "InviteMembersRequest$Message": "

The invitation message that you want to send to the accounts that you’re inviting to GuardDuty as members.

", "IpSetIds$member": null, "Ipv6Addresses$member": null, @@ -1114,7 +1209,9 @@ "ListMembersRequest$NextToken": "

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", "ListMembersRequest$OnlyAssociated": "

Specifies whether to only return associated members or to return all members (including members which haven't been invited yet or have been disassociated).

", "ListMembersResponse$NextToken": "

Pagination parameter to be used on the next list operation to retrieve more items.

", - "ListThreatIntelSetsRequest$NextToken": "

You can use this parameter when paginating results. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", + "ListPublishingDestinationsRequest$NextToken": "

A token to use for paginating results returned in the repsonse. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

", + "ListPublishingDestinationsResponse$NextToken": "

A token to use for paginating results returned in the repsonse. Set the value of this parameter to null for the first request to a list action. For subsequent calls, use the NextToken value returned from the previous request to continue listing results after the first page.

", + "ListThreatIntelSetsRequest$NextToken": "

You can use this parameter to paginate results in the response. Set the value of this parameter to null on your first call to the list action. For subsequent calls to the action fill nextToken in the request with the value of NextToken from the previous response to continue listing data.

", "ListThreatIntelSetsResponse$NextToken": "

Pagination parameter to be used on the next list operation to retrieve more items.

", "LocalPortDetails$PortName": "

Port name of the local connection.

", "Master$InvitationId": "

This value is used to validate the master account to the member account.

", @@ -1163,11 +1260,12 @@ "UpdateFilterRequest$FilterName": "

The name of the filter.

", "UpdateFindingsFeedbackRequest$Comments": "

Additional feedback about the GuardDuty findings.

", "UpdateIPSetRequest$IpSetId": "

The unique ID that specifies the IPSet that you want to update.

", + "UpdatePublishingDestinationRequest$DestinationId": "

The ID of the detector associated with the publishing destinations to update.

", "UpdateThreatIntelSetRequest$ThreatIntelSetId": "

The unique ID that specifies the ThreatIntelSet that you want to update.

" } }, "Tag": { - "base": "

Contains information about the tag associated with the resource.

", + "base": "

Contains information about a tag associated with the Ec2 instance.

", "refs": { "Tags$member": null } @@ -1182,7 +1280,7 @@ "TagKeyList": { "base": null, "refs": { - "UntagResourceRequest$TagKeys": "

The tag keys to remove from a resource.

" + "UntagResourceRequest$TagKeys": "

The tag keys to remove from the resource.

" } }, "TagMap": { @@ -1339,6 +1437,16 @@ "refs": { } }, + "UpdatePublishingDestinationRequest": { + "base": null, + "refs": { + } + }, + "UpdatePublishingDestinationResponse": { + "base": null, + "refs": { + } + }, "UpdateThreatIntelSetRequest": { "base": null, "refs": { diff --git a/models/apis/guardduty/2017-11-28/paginators-1.json b/models/apis/guardduty/2017-11-28/paginators-1.json index 1e70a2ccc09..717e540366d 100644 --- a/models/apis/guardduty/2017-11-28/paginators-1.json +++ b/models/apis/guardduty/2017-11-28/paginators-1.json @@ -36,6 +36,11 @@ "limit_key": "MaxResults", "result_key": "Members" }, + "ListPublishingDestinations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListThreatIntelSets": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/iam/2010-05-08/api-2.json b/models/apis/iam/2010-05-08/api-2.json index a1def8d9103..05eeccc5444 100644 --- a/models/apis/iam/2010-05-08/api-2.json +++ b/models/apis/iam/2010-05-08/api-2.json @@ -4588,7 +4588,8 @@ "Description":{"shape":"roleDescriptionType"}, "MaxSessionDuration":{"shape":"roleMaxSessionDurationType"}, "PermissionsBoundary":{"shape":"AttachedPermissionsBoundary"}, - "Tags":{"shape":"tagListType"} + "Tags":{"shape":"tagListType"}, + "RoleLastUsed":{"shape":"RoleLastUsed"} } }, "RoleDetail":{ @@ -4604,7 +4605,15 @@ "RolePolicyList":{"shape":"policyDetailListType"}, "AttachedManagedPolicies":{"shape":"attachedPoliciesListType"}, "PermissionsBoundary":{"shape":"AttachedPermissionsBoundary"}, - "Tags":{"shape":"tagListType"} + "Tags":{"shape":"tagListType"}, + "RoleLastUsed":{"shape":"RoleLastUsed"} + } + }, + "RoleLastUsed":{ + "type":"structure", + "members":{ + "LastUsedDate":{"shape":"dateType"}, + "Region":{"shape":"stringType"} } }, "RoleUsageListType":{ diff --git a/models/apis/iam/2010-05-08/docs-2.json b/models/apis/iam/2010-05-08/docs-2.json index e6f5ac271bc..6bbc6cad38c 100644 --- a/models/apis/iam/2010-05-08/docs-2.json +++ b/models/apis/iam/2010-05-08/docs-2.json @@ -234,7 +234,7 @@ } }, "AttachedPolicy": { - "base": "

Contains information about an attached policy.

An attached policy is a managed policy that has been attached to a user, group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails operations.

For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

", + "base": "

Contains information about an attached policy.

An attached policy is a managed policy that has been attached to a user, group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails operations.

For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

", "refs": { "attachedPoliciesListType$member": null } @@ -1328,7 +1328,7 @@ } }, "ManagedPolicyDetail": { - "base": "

Contains information about a managed policy, including the policy's ARN, versions, and the number of principal entities (users, groups, and roles) that the policy is attached to.

This data type is used as a response element in the GetAccountAuthorizationDetails operation.

For more information about managed policies, see Managed Policies and Inline Policies in the Using IAM guide.

", + "base": "

Contains information about a managed policy, including the policy's ARN, versions, and the number of principal entities (users, groups, and roles) that the policy is attached to.

This data type is used as a response element in the GetAccountAuthorizationDetails operation.

For more information about managed policies, see Managed Policies and Inline Policies in the IAM User Guide.

", "refs": { "ManagedPolicyDetailListType$member": null } @@ -1387,7 +1387,7 @@ } }, "Policy": { - "base": "

Contains information about a managed policy.

This data type is used as a response element in the CreatePolicy, GetPolicy, and ListPolicies operations.

For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

", + "base": "

Contains information about a managed policy.

This data type is used as a response element in the CreatePolicy, GetPolicy, and ListPolicies operations.

For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

", "refs": { "CreatePolicyResponse$Policy": "

A structure containing details about the new policy.

", "GetPolicyResponse$Policy": "

A structure containing details about the policy.

", @@ -1420,7 +1420,7 @@ } }, "PolicyGroup": { - "base": "

Contains information about a group that a managed policy is attached to.

This data type is used as a response element in the ListEntitiesForPolicy operation.

For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

", + "base": "

Contains information about a group that a managed policy is attached to.

This data type is used as a response element in the ListEntitiesForPolicy operation.

For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

", "refs": { "PolicyGroupListType$member": null } @@ -1443,7 +1443,7 @@ } }, "PolicyRole": { - "base": "

Contains information about a role that a managed policy is attached to.

This data type is used as a response element in the ListEntitiesForPolicy operation.

For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

", + "base": "

Contains information about a role that a managed policy is attached to.

This data type is used as a response element in the ListEntitiesForPolicy operation.

For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

", "refs": { "PolicyRoleListType$member": null } @@ -1468,7 +1468,7 @@ } }, "PolicyUser": { - "base": "

Contains information about a user that a managed policy is attached to.

This data type is used as a response element in the ListEntitiesForPolicy operation.

For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

", + "base": "

Contains information about a user that a managed policy is attached to.

This data type is used as a response element in the ListEntitiesForPolicy operation.

For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

", "refs": { "PolicyUserListType$member": null } @@ -1480,7 +1480,7 @@ } }, "PolicyVersion": { - "base": "

Contains information about a version of a managed policy.

This data type is used as a response element in the CreatePolicyVersion, GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails operations.

For more information about managed policies, refer to Managed Policies and Inline Policies in the Using IAM guide.

", + "base": "

Contains information about a version of a managed policy.

This data type is used as a response element in the CreatePolicyVersion, GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails operations.

For more information about managed policies, refer to Managed Policies and Inline Policies in the IAM User Guide.

", "refs": { "CreatePolicyVersionResponse$PolicyVersion": "

A structure containing details about the new policy version.

", "GetPolicyVersionResponse$PolicyVersion": "

A structure containing details about the policy version.

", @@ -1644,6 +1644,13 @@ "roleDetailListType$member": null } }, + "RoleLastUsed": { + "base": "

Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions Where Data Is Tracked in the IAM User Guide.

This data type is returned as a response element in the GetRole and GetAccountAuthorizationDetails operations.

", + "refs": { + "Role$RoleLastUsed": "

Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions Where Data Is Tracked in the IAM User Guide.

", + "RoleDetail$RoleLastUsed": "

Contains information about the last time that an IAM role was used. This includes the date and time and the Region in which the role was last used. Activity is only reported for the trailing 400 days. This period can be shorter if your Region began supporting these features within the last year. The role might have been used more than 400 days ago. For more information, see Regions Where Data Is Tracked in the IAM User Guide.

" + } + }, "RoleUsageListType": { "base": null, "refs": { @@ -2048,9 +2055,9 @@ "GetPolicyRequest$PolicyArn": "

The Amazon Resource Name (ARN) of the managed policy that you want information about.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", "GetPolicyVersionRequest$PolicyArn": "

The Amazon Resource Name (ARN) of the managed policy that you want information about.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", "GetSAMLProviderRequest$SAMLProviderArn": "

The Amazon Resource Name (ARN) of the SAML provider resource object in IAM to get information about.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", - "Group$Arn": "

The Amazon Resource Name (ARN) specifying the group. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.

", + "Group$Arn": "

The Amazon Resource Name (ARN) specifying the group. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide.

", "GroupDetail$Arn": null, - "InstanceProfile$Arn": "

The Amazon Resource Name (ARN) specifying the instance profile. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.

", + "InstanceProfile$Arn": "

The Amazon Resource Name (ARN) specifying the instance profile. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide.

", "ListEntitiesForPolicyRequest$PolicyArn": "

The Amazon Resource Name (ARN) of the IAM policy for which you want the versions.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", "ListPoliciesGrantingServiceAccessRequest$Arn": "

The ARN of the IAM identity (user, group, or role) whose policies you want to list.

", "ListPolicyVersionsRequest$PolicyArn": "

The Amazon Resource Name (ARN) of the IAM policy for which you want the versions.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", @@ -2064,14 +2071,14 @@ "Role$Arn": "

The Amazon Resource Name (ARN) specifying the role. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide guide.

", "RoleDetail$Arn": null, "SAMLProviderListEntry$Arn": "

The Amazon Resource Name (ARN) of the SAML provider.

", - "ServerCertificateMetadata$Arn": "

The Amazon Resource Name (ARN) specifying the server certificate. For more information about ARNs and how to use them in policies, see IAM Identifiers in the Using IAM guide.

", + "ServerCertificateMetadata$Arn": "

The Amazon Resource Name (ARN) specifying the server certificate. For more information about ARNs and how to use them in policies, see IAM Identifiers in the IAM User Guide.

", "ServiceLastAccessed$LastAuthenticatedEntity": "

The ARN of the authenticated entity (user or role) that last attempted to access the service. AWS does not report unauthenticated requests.

This field is null if no IAM entities attempted to access the service within the reporting period.

", "SetDefaultPolicyVersionRequest$PolicyArn": "

The Amazon Resource Name (ARN) of the IAM policy whose default version you want to set.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", "SimulatePrincipalPolicyRequest$PolicySourceArn": "

The Amazon Resource Name (ARN) of a user, group, or role whose policies you want to include in the simulation. If you specify a user, group, or role, the simulation includes all policies that are associated with that entity. If you specify a user, the simulation also includes all policies that are attached to any groups the user belongs to.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", "UpdateOpenIDConnectProviderThumbprintRequest$OpenIDConnectProviderArn": "

The Amazon Resource Name (ARN) of the IAM OIDC provider resource object for which you want to update the thumbprint. You can get a list of OIDC provider ARNs by using the ListOpenIDConnectProviders operation.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", "UpdateSAMLProviderRequest$SAMLProviderArn": "

The Amazon Resource Name (ARN) of the SAML provider to update.

For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference.

", "UpdateSAMLProviderResponse$SAMLProviderArn": "

The Amazon Resource Name (ARN) of the SAML provider that was updated.

", - "User$Arn": "

The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the Using IAM guide.

", + "User$Arn": "

The Amazon Resource Name (ARN) that identifies the user. For more information about ARNs and how to use ARNs in policies, see IAM Identifiers in the IAM User Guide.

", "UserDetail$Arn": null } }, @@ -2272,6 +2279,7 @@ "PolicyVersion$CreateDate": "

The date and time, in ISO 8601 date-time format, when the policy version was created.

", "Role$CreateDate": "

The date and time, in ISO 8601 date-time format, when the role was created.

", "RoleDetail$CreateDate": "

The date and time, in ISO 8601 date-time format, when the role was created.

", + "RoleLastUsed$LastUsedDate": "

The date and time, in ISO 8601 date-time format that the role was last used.

This field is null if the role has not been used within the IAM tracking period. For more information about the tracking period, see Regions Where Data Is Tracked in the IAM User Guide.

", "SAMLProviderListEntry$ValidUntil": "

The expiration date and time for the SAML provider.

", "SAMLProviderListEntry$CreateDate": "

The date and time when the SAML provider was created.

", "SSHPublicKey$UploadDate": "

The date and time, in ISO 8601 date-time format, when the SSH public key was uploaded.

", @@ -2283,7 +2291,7 @@ "ServiceSpecificCredentialMetadata$CreateDate": "

The date and time, in ISO 8601 date-time format, when the service-specific credential were created.

", "SigningCertificate$UploadDate": "

The date when the signing certificate was uploaded.

", "User$CreateDate": "

The date and time, in ISO 8601 date-time format, when the user was created.

", - "User$PasswordLastUsed": "

The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an AWS website. For a list of AWS websites that capture a user's last sign-in time, see the Credential Reports topic in the Using IAM guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. If the field is null (no value), then it indicates that they never signed in with a password. This can be because:

A null value does not mean that the user never had a password. Also, if the user does not currently have a password, but had one in the past, then this field contains the date and time the most recent password was used.

This value is returned only in the GetUser and ListUsers operations.

", + "User$PasswordLastUsed": "

The date and time, in ISO 8601 date-time format, when the user's password was last used to sign in to an AWS website. For a list of AWS websites that capture a user's last sign-in time, see the Credential Reports topic in the IAM User Guide. If a password is used more than once in a five-minute span, only the first use is returned in this field. If the field is null (no value), then it indicates that they never signed in with a password. This can be because:

A null value does not mean that the user never had a password. Also, if the user does not currently have a password but had one in the past, then this field contains the date and time the most recent password was used.

This value is returned only in the GetUser and ListUsers operations.

", "UserDetail$CreateDate": "

The date and time, in ISO 8601 date-time format, when the user was created.

", "VirtualMFADevice$EnableDate": "

The date and time on which the virtual MFA device was enabled.

" } @@ -2428,19 +2436,19 @@ "base": null, "refs": { "EntityInfo$Id": "

The identifier of the entity (user or role).

", - "Group$GroupId": "

The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the Using IAM guide.

", - "GroupDetail$GroupId": "

The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the Using IAM guide.

", - "InstanceProfile$InstanceProfileId": "

The stable and unique string identifying the instance profile. For more information about IDs, see IAM Identifiers in the Using IAM guide.

", - "ManagedPolicyDetail$PolicyId": "

The stable and unique string identifying the policy.

For more information about IDs, see IAM Identifiers in the Using IAM guide.

", - "Policy$PolicyId": "

The stable and unique string identifying the policy.

For more information about IDs, see IAM Identifiers in the Using IAM guide.

", + "Group$GroupId": "

The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the IAM User Guide.

", + "GroupDetail$GroupId": "

The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the IAM User Guide.

", + "InstanceProfile$InstanceProfileId": "

The stable and unique string identifying the instance profile. For more information about IDs, see IAM Identifiers in the IAM User Guide.

", + "ManagedPolicyDetail$PolicyId": "

The stable and unique string identifying the policy.

For more information about IDs, see IAM Identifiers in the IAM User Guide.

", + "Policy$PolicyId": "

The stable and unique string identifying the policy.

For more information about IDs, see IAM Identifiers in the IAM User Guide.

", "PolicyGroup$GroupId": "

The stable and unique string identifying the group. For more information about IDs, see IAM Identifiers in the IAM User Guide.

", "PolicyRole$RoleId": "

The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the IAM User Guide.

", "PolicyUser$UserId": "

The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the IAM User Guide.

", - "Role$RoleId": "

The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the Using IAM guide.

", - "RoleDetail$RoleId": "

The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the Using IAM guide.

", - "ServerCertificateMetadata$ServerCertificateId": "

The stable and unique string identifying the server certificate. For more information about IDs, see IAM Identifiers in the Using IAM guide.

", - "User$UserId": "

The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the Using IAM guide.

", - "UserDetail$UserId": "

The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the Using IAM guide.

" + "Role$RoleId": "

The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the IAM User Guide.

", + "RoleDetail$RoleId": "

The stable and unique string identifying the role. For more information about IDs, see IAM Identifiers in the IAM User Guide.

", + "ServerCertificateMetadata$ServerCertificateId": "

The stable and unique string identifying the server certificate. For more information about IDs, see IAM Identifiers in the IAM User Guide.

", + "User$UserId": "

The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the IAM User Guide.

", + "UserDetail$UserId": "

The stable and unique string identifying the user. For more information about IDs, see IAM Identifiers in the IAM User Guide.

" } }, "instanceProfileListType": { @@ -2702,20 +2710,20 @@ "CreateRoleRequest$Path": "

The path to the role. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

", "CreateUserRequest$Path": "

The path for the user name. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

", "CreateVirtualMFADeviceRequest$Path": "

The path for the virtual MFA device. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/).

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

", - "EntityInfo$Path": "

The path to the entity (user or role). For more information about paths, see IAM Identifiers in the Using IAM guide.

", - "Group$Path": "

The path to the group. For more information about paths, see IAM Identifiers in the Using IAM guide.

", - "GroupDetail$Path": "

The path to the group. For more information about paths, see IAM Identifiers in the Using IAM guide.

", - "InstanceProfile$Path": "

The path to the instance profile. For more information about paths, see IAM Identifiers in the Using IAM guide.

", + "EntityInfo$Path": "

The path to the entity (user or role). For more information about paths, see IAM Identifiers in the IAM User Guide.

", + "Group$Path": "

The path to the group. For more information about paths, see IAM Identifiers in the IAM User Guide.

", + "GroupDetail$Path": "

The path to the group. For more information about paths, see IAM Identifiers in the IAM User Guide.

", + "InstanceProfile$Path": "

The path to the instance profile. For more information about paths, see IAM Identifiers in the IAM User Guide.

", "ListEntitiesForPolicyRequest$PathPrefix": "

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all entities.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

", - "Role$Path": "

The path to the role. For more information about paths, see IAM Identifiers in the Using IAM guide.

", - "RoleDetail$Path": "

The path to the role. For more information about paths, see IAM Identifiers in the Using IAM guide.

", - "ServerCertificateMetadata$Path": "

The path to the server certificate. For more information about paths, see IAM Identifiers in the Using IAM guide.

", + "Role$Path": "

The path to the role. For more information about paths, see IAM Identifiers in the IAM User Guide.

", + "RoleDetail$Path": "

The path to the role. For more information about paths, see IAM Identifiers in the IAM User Guide.

", + "ServerCertificateMetadata$Path": "

The path to the server certificate. For more information about paths, see IAM Identifiers in the IAM User Guide.

", "UpdateGroupRequest$NewPath": "

New path for the IAM group. Only include this if changing the group's path.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

", "UpdateServerCertificateRequest$NewPath": "

The new path for the server certificate. Include this only if you are updating the server certificate's path.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

", "UpdateUserRequest$NewPath": "

New path for the IAM user. Include this parameter only if you're changing the user's path.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

", "UploadServerCertificateRequest$Path": "

The path for the server certificate. For more information about paths, see IAM Identifiers in the IAM User Guide.

This parameter is optional. If it is not included, it defaults to a slash (/). This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

If you are uploading a server certificate specifically for use with Amazon CloudFront distributions, you must specify a path using the path parameter. The path must begin with /cloudfront and must include a trailing slash (for example, /cloudfront/test/).

", - "User$Path": "

The path to the user. For more information about paths, see IAM Identifiers in the Using IAM guide.

", - "UserDetail$Path": "

The path to the user. For more information about paths, see IAM Identifiers in the Using IAM guide.

" + "User$Path": "

The path to the user. For more information about paths, see IAM Identifiers in the IAM User Guide.

", + "UserDetail$Path": "

The path to the user. For more information about paths, see IAM Identifiers in the IAM User Guide.

" } }, "policyDescriptionType": { @@ -2834,8 +2842,8 @@ "ListAttachedRolePoliciesRequest$PathPrefix": "

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

", "ListAttachedUserPoliciesRequest$PathPrefix": "

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies.

This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

", "ListPoliciesRequest$PathPrefix": "

The path prefix for filtering the results. This parameter is optional. If it is not included, it defaults to a slash (/), listing all policies. This parameter allows (through its regex pattern) a string of characters consisting of either a forward slash (/) by itself or a string that must begin and end with forward slashes. In addition, it can contain any ASCII character from the ! (\\u0021) through the DEL character (\\u007F), including most punctuation characters, digits, and upper and lowercased letters.

", - "ManagedPolicyDetail$Path": "

The path to the policy.

For more information about paths, see IAM Identifiers in the Using IAM guide.

", - "Policy$Path": "

The path to the policy.

For more information about paths, see IAM Identifiers in the Using IAM guide.

" + "ManagedPolicyDetail$Path": "

The path to the policy.

For more information about paths, see IAM Identifiers in the IAM User Guide.

", + "Policy$Path": "

The path to the policy.

For more information about paths, see IAM Identifiers in the IAM User Guide.

" } }, "policyScopeType": { @@ -2855,7 +2863,7 @@ "refs": { "DeletePolicyVersionRequest$VersionId": "

The policy version to delete.

This parameter allows (through its regex pattern) a string of characters that consists of the lowercase letter 'v' followed by one or two digits, and optionally followed by a period '.' and a string of letters and digits.

For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

", "GetPolicyVersionRequest$VersionId": "

Identifies the policy version to retrieve.

This parameter allows (through its regex pattern) a string of characters that consists of the lowercase letter 'v' followed by one or two digits, and optionally followed by a period '.' and a string of letters and digits.

", - "ManagedPolicyDetail$DefaultVersionId": "

The identifier for the version of the policy that is set as the default (operative) version.

For more information about policy versions, see Versioning for Managed Policies in the Using IAM guide.

", + "ManagedPolicyDetail$DefaultVersionId": "

The identifier for the version of the policy that is set as the default (operative) version.

For more information about policy versions, see Versioning for Managed Policies in the IAM User Guide.

", "Policy$DefaultVersionId": "

The identifier for the version of the policy that is set as the default version.

", "PolicyVersion$VersionId": "

The identifier for the policy version.

Policy version identifiers always begin with v (always lowercase). When a policy is created, the first policy version is v1.

", "SetDefaultPolicyVersionRequest$VersionId": "

The version of the policy to set as the default (operative) version.

For more information about managed policy versions, see Versioning for Managed Policies in the IAM User Guide.

" @@ -3117,7 +3125,8 @@ "AccessKeyLastUsed$ServiceName": "

The name of the AWS service with which this access key was most recently used. The value of this field is \"N/A\" in the following situations:

", "AccessKeyLastUsed$Region": "

The AWS Region where this access key was most recently used. The value for this field is \"N/A\" in the following situations:

For more information about AWS Regions, see Regions and Endpoints in the Amazon Web Services General Reference.

", "ErrorDetails$Message": "

Detailed information about the reason that the operation failed.

", - "ErrorDetails$Code": "

The error code associated with the operation failure.

" + "ErrorDetails$Code": "

The error code associated with the operation failure.

", + "RoleLastUsed$Region": "

The name of the AWS Region in which the role was last used.

" } }, "summaryKeyType": { @@ -3182,7 +3191,7 @@ } }, "thumbprintType": { - "base": "

Contains a thumbprint for an identity provider's server certificate.

The identity provider's server certificate thumbprint is the hex-encoded SHA-1 hash value of the self-signed X.509 certificate used by the domain where the OpenID Connect provider makes its keys available. It is always a 40-character string.

", + "base": "

Contains a thumbprint for an identity provider's server certificate.

The identity provider's server certificate thumbprint is the hex-encoded SHA-1 hash value of the self-signed X.509 certificate. This thumbprint is used by the domain where the OpenID Connect provider makes its keys available. The thumbprint is always a 40-character string.

", "refs": { "thumbprintListType$member": null } diff --git a/models/apis/iam/2010-05-08/examples-1.json b/models/apis/iam/2010-05-08/examples-1.json index 928dc2132a0..cd3a94aa608 100644 --- a/models/apis/iam/2010-05-08/examples-1.json +++ b/models/apis/iam/2010-05-08/examples-1.json @@ -733,8 +733,13 @@ "Arn": "arn:aws:iam::123456789012:role/Test-Role", "AssumeRolePolicyDocument": "", "CreateDate": "2013-04-18T05:01:58Z", + "MaxSessionDuration": 3600, "Path": "/", "RoleId": "AROADBQP57FF2AEXAMPLE", + "RoleLastUsed": { + "LastUsedDate": "2019-11-18T05:01:58Z", + "Region": "us-east-1" + }, "RoleName": "Test-Role" } }, diff --git a/models/apis/iot/2015-05-28/api-2.json b/models/apis/iot/2015-05-28/api-2.json index c17b72b790b..99a2a3735c1 100644 --- a/models/apis/iot/2015-05-28/api-2.json +++ b/models/apis/iot/2015-05-28/api-2.json @@ -238,6 +238,22 @@ {"shape":"InternalFailureException"} ] }, + "ConfirmTopicRuleDestination":{ + "name":"ConfirmTopicRuleDestination", + "http":{ + "method":"GET", + "requestUri":"/confirmdestination/{confirmationToken+}" + }, + "input":{"shape":"ConfirmTopicRuleDestinationRequest"}, + "output":{"shape":"ConfirmTopicRuleDestinationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ConflictingResourceUpdateException"} + ] + }, "CreateAuthorizer":{ "name":"CreateAuthorizer", "http":{ @@ -544,6 +560,22 @@ {"shape":"ConflictingResourceUpdateException"} ] }, + "CreateTopicRuleDestination":{ + "name":"CreateTopicRuleDestination", + "http":{ + "method":"POST", + "requestUri":"/destinations" + }, + "input":{"shape":"CreateTopicRuleDestinationRequest"}, + "output":{"shape":"CreateTopicRuleDestinationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"ConflictingResourceUpdateException"} + ] + }, "DeleteAccountAuditConfiguration":{ "name":"DeleteAccountAuditConfiguration", "http":{ @@ -887,6 +919,22 @@ {"shape":"ConflictingResourceUpdateException"} ] }, + "DeleteTopicRuleDestination":{ + "name":"DeleteTopicRuleDestination", + "http":{ + "method":"DELETE", + "requestUri":"/destinations/{arn+}" + }, + "input":{"shape":"DeleteTopicRuleDestinationRequest"}, + "output":{"shape":"DeleteTopicRuleDestinationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ConflictingResourceUpdateException"} + ] + }, "DeleteV2LoggingLevel":{ "name":"DeleteV2LoggingLevel", "http":{ @@ -1372,6 +1420,26 @@ {"shape":"ConflictingResourceUpdateException"} ] }, + "GetCardinality":{ + "name":"GetCardinality", + "http":{ + "method":"POST", + "requestUri":"/indices/cardinality" + }, + "input":{"shape":"GetCardinalityRequest"}, + "output":{"shape":"GetCardinalityResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidQueryException"}, + {"shape":"InvalidAggregationException"}, + {"shape":"IndexNotReadyException"} + ] + }, "GetEffectivePolicies":{ "name":"GetEffectivePolicies", "http":{ @@ -1452,6 +1520,26 @@ {"shape":"ResourceNotFoundException"} ] }, + "GetPercentiles":{ + "name":"GetPercentiles", + "http":{ + "method":"POST", + "requestUri":"/indices/percentiles" + }, + "input":{"shape":"GetPercentilesRequest"}, + "output":{"shape":"GetPercentilesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidQueryException"}, + {"shape":"InvalidAggregationException"}, + {"shape":"IndexNotReadyException"} + ] + }, "GetPolicy":{ "name":"GetPolicy", "http":{ @@ -1537,6 +1625,21 @@ {"shape":"UnauthorizedException"} ] }, + "GetTopicRuleDestination":{ + "name":"GetTopicRuleDestination", + "http":{ + "method":"GET", + "requestUri":"/destinations/{arn+}" + }, + "input":{"shape":"GetTopicRuleDestinationRequest"}, + "output":{"shape":"GetTopicRuleDestinationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"UnauthorizedException"} + ] + }, "GetV2LoggingOptions":{ "name":"GetV2LoggingOptions", "http":{ @@ -2171,6 +2274,21 @@ {"shape":"ResourceNotFoundException"} ] }, + "ListTopicRuleDestinations":{ + "name":"ListTopicRuleDestinations", + "http":{ + "method":"GET", + "requestUri":"/destinations" + }, + "input":{"shape":"ListTopicRuleDestinationsRequest"}, + "output":{"shape":"ListTopicRuleDestinationsResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"UnauthorizedException"} + ] + }, "ListTopicRules":{ "name":"ListTopicRules", "http":{ @@ -2847,6 +2965,22 @@ {"shape":"ResourceNotFoundException"} ] }, + "UpdateTopicRuleDestination":{ + "name":"UpdateTopicRuleDestination", + "http":{ + "method":"PATCH", + "requestUri":"/destinations" + }, + "input":{"shape":"UpdateTopicRuleDestinationRequest"}, + "output":{"shape":"UpdateTopicRuleDestinationResponse"}, + "errors":[ + {"shape":"InternalException"}, + {"shape":"InvalidRequestException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ConflictingResourceUpdateException"} + ] + }, "ValidateSecurityProfileBehaviors":{ "name":"ValidateSecurityProfileBehaviors", "http":{ @@ -2932,7 +3066,8 @@ "salesforce":{"shape":"SalesforceAction"}, "iotAnalytics":{"shape":"IotAnalyticsAction"}, "iotEvents":{"shape":"IotEventsAction"}, - "stepFunctions":{"shape":"StepFunctionsAction"} + "stepFunctions":{"shape":"StepFunctionsAction"}, + "http":{"shape":"HttpAction"} } }, "ActionList":{ @@ -3468,6 +3603,7 @@ "DISABLE" ] }, + "Average":{"type":"double"}, "AwsAccountId":{ "type":"string", "max":12, @@ -3904,6 +4040,27 @@ "Enabled":{"shape":"Enabled"} } }, + "ConfirmTopicRuleDestinationRequest":{ + "type":"structure", + "required":["confirmationToken"], + "members":{ + "confirmationToken":{ + "shape":"ConfirmationToken", + "location":"uri", + "locationName":"confirmationToken" + } + } + }, + "ConfirmTopicRuleDestinationResponse":{ + "type":"structure", + "members":{ + } + }, + "ConfirmationToken":{ + "type":"string", + "max":2048, + "min":1 + }, "ConflictingResourceUpdateException":{ "type":"structure", "members":{ @@ -4350,6 +4507,19 @@ "thingTypeId":{"shape":"ThingTypeId"} } }, + "CreateTopicRuleDestinationRequest":{ + "type":"structure", + "required":["destinationConfiguration"], + "members":{ + "destinationConfiguration":{"shape":"TopicRuleDestinationConfiguration"} + } + }, + "CreateTopicRuleDestinationResponse":{ + "type":"structure", + "members":{ + "topicRuleDestination":{"shape":"TopicRuleDestination"} + } + }, "CreateTopicRuleRequest":{ "type":"structure", "required":[ @@ -4781,6 +4951,22 @@ "members":{ } }, + "DeleteTopicRuleDestinationRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"AwsArn", + "location":"uri", + "locationName":"arn" + } + } + }, + "DeleteTopicRuleDestinationResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteTopicRuleRequest":{ "type":"structure", "required":["ruleName"], @@ -5601,6 +5787,26 @@ "FailedChecksCount":{"type":"integer"}, "FailedFindingsCount":{"type":"long"}, "FailedThings":{"type":"integer"}, + "Field":{ + "type":"structure", + "members":{ + "name":{"shape":"FieldName"}, + "type":{"shape":"FieldType"} + } + }, + "FieldName":{"type":"string"}, + "FieldType":{ + "type":"string", + "enum":[ + "Number", + "String", + "Boolean" + ] + }, + "Fields":{ + "type":"list", + "member":{"shape":"Field"} + }, "FileId":{ "type":"integer", "max":255, @@ -5648,12 +5854,23 @@ "ForceFlag":{"type":"boolean"}, "Forced":{"type":"boolean"}, "FunctionArn":{"type":"string"}, - "GEMaxResults":{ - "type":"integer", - "max":10000, - "min":1 - }, "GenerationId":{"type":"string"}, + "GetCardinalityRequest":{ + "type":"structure", + "required":["queryString"], + "members":{ + "indexName":{"shape":"IndexName"}, + "queryString":{"shape":"QueryString"}, + "aggregationField":{"shape":"AggregationField"}, + "queryVersion":{"shape":"QueryVersion"} + } + }, + "GetCardinalityResponse":{ + "type":"structure", + "members":{ + "cardinality":{"shape":"Count"} + } + }, "GetEffectivePoliciesRequest":{ "type":"structure", "members":{ @@ -5730,6 +5947,23 @@ "otaUpdateInfo":{"shape":"OTAUpdateInfo"} } }, + "GetPercentilesRequest":{ + "type":"structure", + "required":["queryString"], + "members":{ + "indexName":{"shape":"IndexName"}, + "queryString":{"shape":"QueryString"}, + "aggregationField":{"shape":"AggregationField"}, + "queryVersion":{"shape":"QueryVersion"}, + "percents":{"shape":"PercentList"} + } + }, + "GetPercentilesResponse":{ + "type":"structure", + "members":{ + "percentiles":{"shape":"Percentiles"} + } + }, "GetPolicyRequest":{ "type":"structure", "required":["policyName"], @@ -5812,6 +6046,23 @@ "statistics":{"shape":"Statistics"} } }, + "GetTopicRuleDestinationRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"AwsArn", + "location":"uri", + "locationName":"arn" + } + } + }, + "GetTopicRuleDestinationResponse":{ + "type":"structure", + "members":{ + "topicRuleDestination":{"shape":"TopicRuleDestination"} + } + }, "GetTopicRuleRequest":{ "type":"structure", "required":["ruleName"], @@ -5853,6 +6104,64 @@ "HashAlgorithm":{"type":"string"}, "HashKeyField":{"type":"string"}, "HashKeyValue":{"type":"string"}, + "HeaderKey":{ + "type":"string", + "max":256, + "min":1 + }, + "HeaderList":{ + "type":"list", + "member":{"shape":"HttpActionHeader"}, + "max":100, + "min":0 + }, + "HeaderValue":{"type":"string"}, + "HttpAction":{ + "type":"structure", + "required":["url"], + "members":{ + "url":{"shape":"Url"}, + "confirmationUrl":{"shape":"Url"}, + "headers":{"shape":"HeaderList"}, + "auth":{"shape":"HttpAuthorization"} + } + }, + "HttpActionHeader":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{"shape":"HeaderKey"}, + "value":{"shape":"HeaderValue"} + } + }, + "HttpAuthorization":{ + "type":"structure", + "members":{ + "sigv4":{"shape":"SigV4Authorization"} + } + }, + "HttpUrlDestinationConfiguration":{ + "type":"structure", + "required":["confirmationUrl"], + "members":{ + "confirmationUrl":{"shape":"Url"} + } + }, + "HttpUrlDestinationProperties":{ + "type":"structure", + "members":{ + "confirmationUrl":{"shape":"Url"} + } + }, + "HttpUrlDestinationSummary":{ + "type":"structure", + "members":{ + "confirmationUrl":{"shape":"Url"} + } + }, "ImplicitDeny":{ "type":"structure", "members":{ @@ -7402,6 +7711,28 @@ "nextToken":{"shape":"NextToken"} } }, + "ListTopicRuleDestinationsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"TopicRuleDestinationMaxResults", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListTopicRuleDestinationsResponse":{ + "type":"structure", + "members":{ + "destinationSummaries":{"shape":"TopicRuleDestinationSummaries"}, + "nextToken":{"shape":"NextToken"} + } + }, "ListTopicRulesRequest":{ "type":"structure", "members":{ @@ -7411,7 +7742,7 @@ "locationName":"topic" }, "maxResults":{ - "shape":"GEMaxResults", + "shape":"TopicRuleMaxResults", "location":"querystring", "locationName":"maxResults" }, @@ -7573,6 +7904,7 @@ "max":250, "min":1 }, + "Maximum":{"type":"double"}, "MaximumPerMinute":{ "type":"integer", "max":1000, @@ -7601,6 +7933,7 @@ "ports":{"shape":"Ports"} } }, + "Minimum":{"type":"double"}, "MinimumNumberOfExecutedThings":{ "type":"integer", "min":1 @@ -7793,11 +8126,32 @@ }, "PartitionKey":{"type":"string"}, "PayloadField":{"type":"string"}, + "Percent":{ + "type":"double", + "max":100, + "min":0 + }, + "PercentList":{ + "type":"list", + "member":{"shape":"Percent"} + }, + "PercentPair":{ + "type":"structure", + "members":{ + "percent":{"shape":"Percent"}, + "value":{"shape":"PercentValue"} + } + }, + "PercentValue":{"type":"double"}, "Percentage":{ "type":"integer", "max":100, "min":0 }, + "Percentiles":{ + "type":"list", + "member":{"shape":"PercentPair"} + }, "Platform":{"type":"string"}, "Policies":{ "type":"list", @@ -8425,6 +8779,7 @@ "type":"list", "member":{"shape":"SecurityProfileTarget"} }, + "ServiceName":{"type":"string"}, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -8497,6 +8852,19 @@ "disableAllLogs":{"shape":"DisableAllLogs"} } }, + "SigV4Authorization":{ + "type":"structure", + "required":[ + "signingRegion", + "serviceName", + "roleArn" + ], + "members":{ + "signingRegion":{"shape":"SigningRegion"}, + "serviceName":{"shape":"ServiceName"}, + "roleArn":{"shape":"AwsArn"} + } + }, "Signature":{"type":"blob"}, "SignatureAlgorithm":{"type":"string"}, "SigningJobId":{"type":"string"}, @@ -8509,6 +8877,7 @@ "certificatePathOnDevice":{"shape":"CertificatePathOnDevice"} } }, + "SigningRegion":{"type":"string"}, "SkippedFindingsCount":{"type":"long"}, "SkyfallMaxResults":{ "type":"integer", @@ -8633,7 +9002,35 @@ "Statistics":{ "type":"structure", "members":{ - "count":{"shape":"Count"} + "count":{"shape":"Count"}, + "average":{ + "shape":"Average", + "box":true + }, + "sum":{ + "shape":"Sum", + "box":true + }, + "minimum":{ + "shape":"Minimum", + "box":true + }, + "maximum":{ + "shape":"Maximum", + "box":true + }, + "sumOfSquares":{ + "shape":"SumOfSquares", + "box":true + }, + "variance":{ + "shape":"Variance", + "box":true + }, + "stdDeviation":{ + "shape":"StdDeviation", + "box":true + } } }, "Status":{ @@ -8646,6 +9043,7 @@ "Cancelling" ] }, + "StdDeviation":{"type":"double"}, "StepFunctionsAction":{ "type":"structure", "required":[ @@ -8746,6 +9144,8 @@ }, "SucceededFindingsCount":{"type":"long"}, "SucceededThings":{"type":"integer"}, + "Sum":{"type":"double"}, + "SumOfSquares":{"type":"double"}, "TableName":{"type":"string"}, "Tag":{ "type":"structure", @@ -8961,7 +9361,9 @@ "type":"structure", "required":["thingGroupIndexingMode"], "members":{ - "thingGroupIndexingMode":{"shape":"ThingGroupIndexingMode"} + "thingGroupIndexingMode":{"shape":"ThingGroupIndexingMode"}, + "managedFields":{"shape":"Fields"}, + "customFields":{"shape":"Fields"} } }, "ThingGroupIndexingMode":{ @@ -9016,7 +9418,9 @@ "required":["thingIndexingMode"], "members":{ "thingIndexingMode":{"shape":"ThingIndexingMode"}, - "thingConnectivityIndexingMode":{"shape":"ThingConnectivityIndexingMode"} + "thingConnectivityIndexingMode":{"shape":"ThingConnectivityIndexingMode"}, + "managedFields":{"shape":"Fields"}, + "customFields":{"shape":"Fields"} } }, "ThingIndexingMode":{ @@ -9126,6 +9530,48 @@ "errorAction":{"shape":"Action"} } }, + "TopicRuleDestination":{ + "type":"structure", + "members":{ + "arn":{"shape":"AwsArn"}, + "status":{"shape":"TopicRuleDestinationStatus"}, + "statusReason":{"shape":"String"}, + "httpUrlProperties":{"shape":"HttpUrlDestinationProperties"} + } + }, + "TopicRuleDestinationConfiguration":{ + "type":"structure", + "members":{ + "httpUrlConfiguration":{"shape":"HttpUrlDestinationConfiguration"} + } + }, + "TopicRuleDestinationMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, + "TopicRuleDestinationStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "IN_PROGRESS", + "DISABLED", + "ERROR" + ] + }, + "TopicRuleDestinationSummaries":{ + "type":"list", + "member":{"shape":"TopicRuleDestinationSummary"} + }, + "TopicRuleDestinationSummary":{ + "type":"structure", + "members":{ + "arn":{"shape":"AwsArn"}, + "status":{"shape":"TopicRuleDestinationStatus"}, + "statusReason":{"shape":"String"}, + "httpUrlSummary":{"shape":"HttpUrlDestinationSummary"} + } + }, "TopicRuleList":{ "type":"list", "member":{"shape":"TopicRuleListItem"} @@ -9140,6 +9586,11 @@ "ruleDisabled":{"shape":"IsDisabled"} } }, + "TopicRuleMaxResults":{ + "type":"integer", + "max":10000, + "min":1 + }, "TopicRulePayload":{ "type":"structure", "required":[ @@ -9593,6 +10044,26 @@ "members":{ } }, + "UpdateTopicRuleDestinationRequest":{ + "type":"structure", + "required":[ + "arn", + "status" + ], + "members":{ + "arn":{"shape":"AwsArn"}, + "status":{"shape":"TopicRuleDestinationStatus"} + } + }, + "UpdateTopicRuleDestinationResponse":{ + "type":"structure", + "members":{ + } + }, + "Url":{ + "type":"string", + "max":2000 + }, "UseBase64":{"type":"boolean"}, "Valid":{"type":"boolean"}, "ValidateSecurityProfileBehaviorsRequest":{ @@ -9620,6 +10091,7 @@ "member":{"shape":"ValidationError"} }, "Value":{"type":"string"}, + "Variance":{"type":"double"}, "Version":{"type":"long"}, "VersionConflictException":{ "type":"structure", diff --git a/models/apis/iot/2015-05-28/docs-2.json b/models/apis/iot/2015-05-28/docs-2.json index d47dfbfd978..aec218d8f31 100644 --- a/models/apis/iot/2015-05-28/docs-2.json +++ b/models/apis/iot/2015-05-28/docs-2.json @@ -16,6 +16,7 @@ "CancelJob": "

Cancels a job.

", "CancelJobExecution": "

Cancels the execution of a job for a given thing.

", "ClearDefaultAuthorizer": "

Clears the default authorizer.

", + "ConfirmTopicRuleDestination": "

Confirms a topic rule destination. When you create a rule requiring a destination, AWS IoT sends a confirmation message to the endpoint or base address you specify. The message includes a token which you pass back when calling ConfirmTopicRuleDestination to confirm that you own or have access to the endpoint.

", "CreateAuthorizer": "

Creates an authorizer.

", "CreateBillingGroup": "

Creates a billing group.

", "CreateCertificateFromCsr": "

Creates an X.509 certificate using the specified certificate signing request.

Note: The CSR must include a public key that is either an RSA key with a length of at least 2048 bits or an ECC key from NIST P-256 or NIST P-384 curves.

Note: Reusing the same certificate signing request (CSR) results in a distinct certificate.

You can create multiple certificates in a batch by creating a directory, copying multiple .csr files into that directory, and then specifying that directory on the command line. The following commands show how to create a batch of certificates given a batch of CSRs.

Assuming a set of CSRs are located inside of the directory my-csr-directory:

On Linux and OS X, the command is:

$ ls my-csr-directory/ | xargs -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

This command lists all of the CSRs in my-csr-directory and pipes each CSR file name to the aws iot create-certificate-from-csr AWS CLI command to create a certificate for the corresponding CSR.

The aws iot create-certificate-from-csr part of the command can also be run in parallel to speed up the certificate creation process:

$ ls my-csr-directory/ | xargs -P 10 -I {} aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/{}

On Windows PowerShell, the command to create certificates for all CSRs in my-csr-directory is:

> ls -Name my-csr-directory | %{aws iot create-certificate-from-csr --certificate-signing-request file://my-csr-directory/$_}

On a Windows command prompt, the command to create certificates for all CSRs in my-csr-directory is:

> forfiles /p my-csr-directory /c \"cmd /c aws iot create-certificate-from-csr --certificate-signing-request file://@path\"

", @@ -29,11 +30,12 @@ "CreateRoleAlias": "

Creates a role alias.

", "CreateScheduledAudit": "

Creates a scheduled audit that is run at a specified time interval.

", "CreateSecurityProfile": "

Creates a Device Defender security profile.

", - "CreateStream": "

Creates a stream for delivering one or more large files in chunks over MQTT. A stream transports data bytes in chunks or blocks packaged as MQTT messages from a source like S3. You can have one or more files associated with a stream. The total size of a file associated with the stream cannot exceed more than 2 MB. The stream will be created with version 0. If a stream is created with the same streamID as a stream that existed and was deleted within last 90 days, we will resurrect that old stream by incrementing the version by 1.

", + "CreateStream": "

Creates a stream for delivering one or more large files in chunks over MQTT. A stream transports data bytes in chunks or blocks packaged as MQTT messages from a source like S3. You can have one or more files associated with a stream.

", "CreateThing": "

Creates a thing record in the registry. If this call is made multiple times using the same thing name and configuration, the call will succeed. If this call is made with the same thing name but different configuration a ResourceAlreadyExistsException is thrown.

This is a control plane operation. See Authorization for information about authorizing control plane actions.

", "CreateThingGroup": "

Create a thing group.

This is a control plane operation. See Authorization for information about authorizing control plane actions.

", "CreateThingType": "

Creates a new thing type.

", "CreateTopicRule": "

Creates a rule. Creating rules is an administrator-level action. Any user who has permission to create rules will be able to access data processed by the rule.

", + "CreateTopicRuleDestination": "

Creates a topic rule destination. The destination must be confirmed prior to use.

", "DeleteAccountAuditConfiguration": "

Restores the default settings for Device Defender audits for this account. Any configuration data you entered is deleted and all audit checks are reset to disabled.

", "DeleteAuthorizer": "

Deletes an authorizer.

", "DeleteBillingGroup": "

Deletes the billing group.

", @@ -55,6 +57,7 @@ "DeleteThingGroup": "

Deletes a thing group.

", "DeleteThingType": "

Deletes the specified thing type. You cannot delete a thing type if it has things associated with it. To delete a thing type, first mark it as deprecated by calling DeprecateThingType, then remove any associated things by calling UpdateThing to change the thing type on any associated thing, and finally use DeleteThingType to delete the thing type.

", "DeleteTopicRule": "

Deletes the rule.

", + "DeleteTopicRuleDestination": "

Deletes a topic rule destination.

", "DeleteV2LoggingLevel": "

Deletes a logging level.

", "DeprecateThingType": "

Deprecates a thing type. You can not associate new things with deprecated thing type.

", "DescribeAccountAuditConfiguration": "

Gets information about the Device Defender audit settings for this account. Settings include how audit notifications are sent and which audit checks are enabled or disabled.

", @@ -86,16 +89,19 @@ "DetachThingPrincipal": "

Detaches the specified principal from the specified thing. A principal can be X.509 certificates, IAM users, groups, and roles, Amazon Cognito identities or federated identities.

This call is asynchronous. It might take several seconds for the detachment to propagate.

", "DisableTopicRule": "

Disables the rule.

", "EnableTopicRule": "

Enables the rule.

", + "GetCardinality": "

Returns the approximate count of unique values that match the query.

", "GetEffectivePolicies": "

Gets a list of the policies that have an effect on the authorization behavior of the specified device when it connects to the AWS IoT device gateway.

", "GetIndexingConfiguration": "

Gets the search configuration.

", "GetJobDocument": "

Gets a job document.

", "GetLoggingOptions": "

Gets the logging options.

NOTE: use of this command is not recommended. Use GetV2LoggingOptions instead.

", "GetOTAUpdate": "

Gets an OTA update.

", + "GetPercentiles": "

Groups the aggregated values that match the query into percentile groupings. The default percentile groupings are: 1,5,25,50,75,95,99, although you can specify your own when you call GetPercentiles. This function returns a value for each percentile group specified (or the default percentile groupings). The percentile group \"1\" contains the aggregated field value that occurs in approximately one percent of the values that match the query. The percentile group \"5\" contains the aggregated field value that occurs in approximately five percent of the values that match the query, and so on. The result is an approximation, the more values that match the query, the more accurate the percentile values.

", "GetPolicy": "

Gets information about the specified policy with the policy document of the default version.

", "GetPolicyVersion": "

Gets information about the specified policy version.

", "GetRegistrationCode": "

Gets a registration code used to register a CA certificate with AWS IoT.

", "GetStatistics": "

Gets statistics about things that match the specified query.

", "GetTopicRule": "

Gets information about the rule.

", + "GetTopicRuleDestination": "

Gets information about a topic rule destination.

", "GetV2LoggingOptions": "

Gets the fine grained logging options.

", "ListActiveViolations": "

Lists the active violations for a given Device Defender security profile.

", "ListAttachedPolicies": "

Lists the policies attached to the specified thing group.

", @@ -137,6 +143,7 @@ "ListThings": "

Lists your things. Use the attributeName and attributeValue parameters to filter your things. For example, calling ListThings with attributeName=Color and attributeValue=Red retrieves all things in the registry that contain an attribute Color with the value Red.

", "ListThingsInBillingGroup": "

Lists the things you have added to the given billing group.

", "ListThingsInThingGroup": "

Lists the things in the specified group.

", + "ListTopicRuleDestinations": "

Lists all the topic rule destinations in your AWS account.

", "ListTopicRules": "

Lists the rules for the specific topic.

", "ListV2LoggingLevels": "

Lists logging levels.

", "ListViolationEvents": "

Lists the Device Defender security profile violations discovered during the given time period. You can use filters to limit the results to those alerts issued for a particular security profile, behavior, or thing (device).

", @@ -179,6 +186,7 @@ "UpdateThing": "

Updates the data for a thing.

", "UpdateThingGroup": "

Update a thing group.

", "UpdateThingGroupsForThing": "

Updates the groups to which the thing belongs.

", + "UpdateTopicRuleDestination": "

Updates a topic rule destination. You use this to change the status, endpoint URL, or confirmation URL of the destination.

", "ValidateSecurityProfileBehaviors": "

Validates a Device Defender security profile behaviors specification.

" }, "shapes": { @@ -297,6 +305,8 @@ "AggregationField": { "base": null, "refs": { + "GetCardinalityRequest$aggregationField": "

The field to aggregate.

", + "GetPercentilesRequest$aggregationField": "

The field to aggregate.

", "GetStatisticsRequest$aggregationField": "

The aggregation field name. Currently not supported.

" } }, @@ -761,6 +771,12 @@ "UpdateCACertificateRequest$newAutoRegistrationStatus": "

The new value for the auto registration status. Valid values are: \"ENABLE\" or \"DISABLE\".

" } }, + "Average": { + "base": null, + "refs": { + "Statistics$average": "

The average of the aggregated fields. If the field data type is String this value is indeterminate.

" + } + }, "AwsAccountId": { "base": null, "refs": { @@ -778,11 +794,13 @@ "refs": { "CloudwatchAlarmAction$roleArn": "

The IAM role that allows access to the CloudWatch alarm.

", "CloudwatchMetricAction$roleArn": "

The IAM role that allows access to the CloudWatch metric.

", + "DeleteTopicRuleDestinationRequest$arn": "

The ARN of the topic rule destination to delete.

", "DynamoDBAction$roleArn": "

The ARN of the IAM role that grants access to the DynamoDB table.

", "DynamoDBv2Action$roleArn": "

The ARN of the IAM role that grants access to the DynamoDB table.

", "ElasticsearchAction$roleArn": "

The IAM role ARN that has access to Elasticsearch.

", "FirehoseAction$roleArn": "

The IAM role that grants access to the Amazon Kinesis Firehose stream.

", "GetLoggingOptionsResponse$roleArn": "

The ARN of the IAM role that grants access.

", + "GetTopicRuleDestinationRequest$arn": "

The ARN of the topic rule destination.

", "GetV2LoggingOptionsResponse$roleArn": "

The IAM role ARN AWS IoT uses to write to your CloudWatch logs.

", "IotAnalyticsAction$channelArn": "

(deprecated) The ARN of the IoT Analytics channel to which message data will be sent.

", "IotAnalyticsAction$roleArn": "

The ARN of the role which has a policy that grants IoT Analytics permission to send message data via IoT Analytics (iotanalytics:BatchPutMessage).

", @@ -792,10 +810,14 @@ "RepublishAction$roleArn": "

The ARN of the IAM role that grants access.

", "S3Action$roleArn": "

The ARN of the IAM role that grants access.

", "SetV2LoggingOptionsRequest$roleArn": "

The ARN of the role that allows IoT to write to Cloudwatch logs.

", + "SigV4Authorization$roleArn": "

The ARN of the signing role.

", "SnsAction$targetArn": "

The ARN of the SNS topic.

", "SnsAction$roleArn": "

The ARN of the IAM role that grants access.

", "SqsAction$roleArn": "

The ARN of the IAM role that grants access.

", - "StepFunctionsAction$roleArn": "

The ARN of the role that grants IoT permission to start execution of a state machine (\"Action\":\"states:StartExecution\").

" + "StepFunctionsAction$roleArn": "

The ARN of the role that grants IoT permission to start execution of a state machine (\"Action\":\"states:StartExecution\").

", + "TopicRuleDestination$arn": "

The topic rule destination URL.

", + "TopicRuleDestinationSummary$arn": "

The topic rule destination ARN.

", + "UpdateTopicRuleDestinationRequest$arn": "

The ARN of the topic rule destination.

" } }, "AwsIotJobArn": { @@ -1275,6 +1297,22 @@ "EventConfigurations$value": null } }, + "ConfirmTopicRuleDestinationRequest": { + "base": null, + "refs": { + } + }, + "ConfirmTopicRuleDestinationResponse": { + "base": null, + "refs": { + } + }, + "ConfirmationToken": { + "base": null, + "refs": { + "ConfirmTopicRuleDestinationRequest$confirmationToken": "

The token used to confirm ownership or access to the topic rule confirmation URL.

" + } + }, "ConflictingResourceUpdateException": { "base": "

A conflicting resource update exception. This exception is thrown when two pending updates cause a conflict.

", "refs": { @@ -1303,6 +1341,7 @@ "refs": { "DescribeThingRegistrationTaskResponse$successCount": "

The number of things successfully provisioned.

", "DescribeThingRegistrationTaskResponse$failureCount": "

The number of things that failed to be provisioned.

", + "GetCardinalityResponse$cardinality": "

The approximate count of unique values that match the query.

", "Statistics$count": "

The count of things that match the query.

" } }, @@ -1476,6 +1515,16 @@ "refs": { } }, + "CreateTopicRuleDestinationRequest": { + "base": null, + "refs": { + } + }, + "CreateTopicRuleDestinationResponse": { + "base": null, + "refs": { + } + }, "CreateTopicRuleRequest": { "base": "

The input for the CreateTopicRule operation.

", "refs": { @@ -1791,6 +1840,16 @@ "refs": { } }, + "DeleteTopicRuleDestinationRequest": { + "base": null, + "refs": { + } + }, + "DeleteTopicRuleDestinationResponse": { + "base": null, + "refs": { + } + }, "DeleteTopicRuleRequest": { "base": "

The input for the DeleteTopicRule operation.

", "refs": { @@ -2356,6 +2415,33 @@ "JobProcessDetails$numberOfFailedThings": "

The number of things that failed executing the job.

" } }, + "Field": { + "base": "

The field to aggregate.

", + "refs": { + "Fields$member": null + } + }, + "FieldName": { + "base": null, + "refs": { + "Field$name": "

The name of the field.

" + } + }, + "FieldType": { + "base": null, + "refs": { + "Field$type": "

The data type of the field.

" + } + }, + "Fields": { + "base": null, + "refs": { + "ThingGroupIndexingConfiguration$managedFields": "

A list of automatically indexed thing group fields.

", + "ThingGroupIndexingConfiguration$customFields": "

A list of thing group fields to index. This list cannot contain any managed fields. Use the GetIndexingConfiguration API to get a list of managed fields.

", + "ThingIndexingConfiguration$managedFields": "

A list of automatically indexed thing fields.

", + "ThingIndexingConfiguration$customFields": "

A list of thing fields to index. This list cannot contain any managed fields. Use the GetIndexingConfiguration API to get a list of managed fields.

" + } + }, "FileId": { "base": null, "refs": { @@ -2444,12 +2530,6 @@ "LambdaAction$functionArn": "

The ARN of the Lambda function.

" } }, - "GEMaxResults": { - "base": null, - "refs": { - "ListTopicRulesRequest$maxResults": "

The maximum number of results to return.

" - } - }, "GenerationId": { "base": null, "refs": { @@ -2459,6 +2539,16 @@ "GetPolicyVersionResponse$generationId": "

The generation ID of the policy version.

" } }, + "GetCardinalityRequest": { + "base": null, + "refs": { + } + }, + "GetCardinalityResponse": { + "base": null, + "refs": { + } + }, "GetEffectivePoliciesRequest": { "base": null, "refs": { @@ -2509,6 +2599,16 @@ "refs": { } }, + "GetPercentilesRequest": { + "base": null, + "refs": { + } + }, + "GetPercentilesResponse": { + "base": null, + "refs": { + } + }, "GetPolicyRequest": { "base": "

The input for the GetPolicy operation.

", "refs": { @@ -2549,6 +2649,16 @@ "refs": { } }, + "GetTopicRuleDestinationRequest": { + "base": null, + "refs": { + } + }, + "GetTopicRuleDestinationResponse": { + "base": null, + "refs": { + } + }, "GetTopicRuleRequest": { "base": "

The input for the GetTopicRule operation.

", "refs": { @@ -2594,6 +2704,60 @@ "DynamoDBAction$hashKeyValue": "

The hash key value.

" } }, + "HeaderKey": { + "base": null, + "refs": { + "HttpActionHeader$key": "

The HTTP header key.

" + } + }, + "HeaderList": { + "base": null, + "refs": { + "HttpAction$headers": "

The HTTP headers to send with the message data.

" + } + }, + "HeaderValue": { + "base": null, + "refs": { + "HttpActionHeader$value": "

The HTTP header value. Substitution templates are supported.

" + } + }, + "HttpAction": { + "base": "

Send data to an HTTPS endpoint.

", + "refs": { + "Action$http": "

Send data to an HTTPS endpoint.

" + } + }, + "HttpActionHeader": { + "base": "

The HTTP action header.

", + "refs": { + "HeaderList$member": null + } + }, + "HttpAuthorization": { + "base": "

The authorization method used to send messages.

", + "refs": { + "HttpAction$auth": "

The authentication method to use when sending data to an HTTPS endpoint.

" + } + }, + "HttpUrlDestinationConfiguration": { + "base": "

HTTP URL destination configuration used by the topic rule's HTTP action.

", + "refs": { + "TopicRuleDestinationConfiguration$httpUrlConfiguration": "

Configuration of the HTTP URL.

" + } + }, + "HttpUrlDestinationProperties": { + "base": "

HTTP URL destination properties.

", + "refs": { + "TopicRuleDestination$httpUrlProperties": "

Properties of the HTTP URL.

" + } + }, + "HttpUrlDestinationSummary": { + "base": "

Information about an HTTP URL destination.

", + "refs": { + "TopicRuleDestinationSummary$httpUrlSummary": "

Information about the HTTP URL.

" + } + }, "ImplicitDeny": { "base": "

Information that implicitly denies authorization. When policy doesn't explicitly deny or allow an action on a resource it is considered an implicit deny.

", "refs": { @@ -2632,6 +2796,8 @@ "DescribeIndexRequest$indexName": "

The index name.

", "DescribeIndexResponse$indexName": "

The index name.

", "DescribeThingGroupResponse$indexName": "

The dynamic thing group index name.

", + "GetCardinalityRequest$indexName": "

The name of the index to search.

", + "GetPercentilesRequest$indexName": "

The name of the index to search.

", "GetStatisticsRequest$indexName": "

The name of the index to search. The default value is AWS_Things.

", "IndexNamesList$member": null, "SearchIndexRequest$indexName": "

The search index name.

", @@ -3369,6 +3535,16 @@ "refs": { } }, + "ListTopicRuleDestinationsRequest": { + "base": null, + "refs": { + } + }, + "ListTopicRuleDestinationsResponse": { + "base": null, + "refs": { + } + }, "ListTopicRulesRequest": { "base": "

The input for the ListTopicRules operation.

", "refs": { @@ -3507,6 +3683,12 @@ "ListViolationEventsRequest$maxResults": "

The maximum number of results to return at one time.

" } }, + "Maximum": { + "base": null, + "refs": { + "Statistics$maximum": "

The maximum value of the aggregated fields. If the field data type is String this value is indeterminate.

" + } + }, "MaximumPerMinute": { "base": null, "refs": { @@ -3543,6 +3725,12 @@ "ViolationEvent$metricValue": "

The value of the metric (the measurement).

" } }, + "Minimum": { + "base": null, + "refs": { + "Statistics$minimum": "

The minimum value of the aggregated fields. If the field data type is String this value is indeterminate.

" + } + }, "MinimumNumberOfExecutedThings": { "base": null, "refs": { @@ -3697,6 +3885,8 @@ "ListThingsInThingGroupResponse$nextToken": "

The token used to get the next set of results, or null if there are no additional results.

", "ListThingsRequest$nextToken": "

The token to retrieve the next set of results.

", "ListThingsResponse$nextToken": "

The token used to get the next set of results, or null if there are no additional results.

", + "ListTopicRuleDestinationsRequest$nextToken": "

The token to retrieve the next set of results.

", + "ListTopicRuleDestinationsResponse$nextToken": "

The token to retrieve the next set of results.

", "ListTopicRulesRequest$nextToken": "

A token used to retrieve the next value.

", "ListTopicRulesResponse$nextToken": "

A token used to retrieve the next value.

", "ListV2LoggingLevelsRequest$nextToken": "

The token used to get the next set of results, or null if there are no additional results.

", @@ -3889,12 +4079,43 @@ "DynamoDBAction$payloadField": "

The action payload. This name can be customized.

" } }, + "Percent": { + "base": null, + "refs": { + "PercentList$member": null, + "PercentPair$percent": "

The percentile.

" + } + }, + "PercentList": { + "base": null, + "refs": { + "GetPercentilesRequest$percents": "

The percentile groups returned.

" + } + }, + "PercentPair": { + "base": "

Describes the percentile and percentile value.

", + "refs": { + "Percentiles$member": null + } + }, + "PercentValue": { + "base": null, + "refs": { + "PercentPair$value": "

The value of the percentile.

" + } + }, "Percentage": { "base": null, "refs": { "DescribeThingRegistrationTaskResponse$percentageProgress": "

The progress of the bulk provisioning task expressed as a percentage.

" } }, + "Percentiles": { + "base": null, + "refs": { + "GetPercentilesResponse$percentiles": "

The percentile values of the aggregated fields.

" + } + }, "Platform": { "base": null, "refs": { @@ -4064,7 +4285,7 @@ "base": null, "refs": { "AttachPrincipalPolicyRequest$principal": "

The principal, which can be a certificate ARN (as returned from the CreateCertificate operation) or an Amazon Cognito ID.

", - "AttachThingPrincipalRequest$principal": "

The principal, such as a certificate or other credential.

", + "AttachThingPrincipalRequest$principal": "

The principal, which can be a certificate ARN (as returned from the CreateCertificate operation) or an Amazon Cognito ID.

", "DetachPrincipalPolicyRequest$principal": "

The principal.

If the principal is a certificate, specify the certificate ARN. If the principal is an Amazon Cognito identity, specify the identity ID.

", "DetachThingPrincipalRequest$principal": "

If the principal is a certificate, this value must be ARN of the certificate. If the principal is an Amazon Cognito identity, this value must be the ID of the Amazon Cognito identity.

", "GetEffectivePoliciesRequest$principal": "

The principal.

", @@ -4139,7 +4360,7 @@ "Qos": { "base": null, "refs": { - "RepublishAction$qos": "

The Quality of Service (QoS) level to use when republishing messages.

" + "RepublishAction$qos": "

The Quality of Service (QoS) level to use when republishing messages. The default value is 0.

" } }, "QueryMaxResults": { @@ -4155,6 +4376,8 @@ "CreateDynamicThingGroupRequest$queryString": "

The dynamic thing group search query string.

See Query Syntax for information about query string syntax.

", "CreateDynamicThingGroupResponse$queryString": "

The dynamic thing group search query string.

", "DescribeThingGroupResponse$queryString": "

The dynamic thing group search query string.

", + "GetCardinalityRequest$queryString": "

The search query.

", + "GetPercentilesRequest$queryString": "

The query string.

", "GetStatisticsRequest$queryString": "

The query used to search. You can specify \"*\" for the query string to get the count of all indexed things in your AWS account.

", "SearchIndexRequest$queryString": "

The search query string.

", "UpdateDynamicThingGroupRequest$queryString": "

The dynamic thing group search query string to update.

" @@ -4166,6 +4389,8 @@ "CreateDynamicThingGroupRequest$queryVersion": "

The dynamic thing group query version.

Currently one query version is supported: \"2017-09-30\". If not specified, the query version defaults to this value.

", "CreateDynamicThingGroupResponse$queryVersion": "

The dynamic thing group query version.

", "DescribeThingGroupResponse$queryVersion": "

The dynamic thing group query version.

", + "GetCardinalityRequest$queryVersion": "

The query version.

", + "GetPercentilesRequest$queryVersion": "

The query version.

", "GetStatisticsRequest$queryVersion": "

The version of the query used to search.

", "SearchIndexRequest$queryVersion": "

The query version.

", "UpdateDynamicThingGroupRequest$queryVersion": "

The dynamic thing group query version to update.

Currently one query version is supported: \"2017-09-30\". If not specified, the query version defaults to this value.

" @@ -4767,6 +4992,12 @@ "ListTargetsForSecurityProfileResponse$securityProfileTargets": "

The thing groups to which the security profile is attached.

" } }, + "ServiceName": { + "base": null, + "refs": { + "SigV4Authorization$serviceName": "

The service name to use while signing with Sig V4.

" + } + }, "ServiceUnavailableException": { "base": "

The service is temporarily unavailable.

", "refs": { @@ -4823,6 +5054,12 @@ "refs": { } }, + "SigV4Authorization": { + "base": "

Use Sig V4 authorization.

", + "refs": { + "HttpAuthorization$sigv4": "

Use Sig V4 authorization. For more information, see Signature Version 4 Signing Process.

" + } + }, "Signature": { "base": null, "refs": { @@ -4853,6 +5090,12 @@ "StartSigningJobParameter$signingProfileParameter": "

Describes the code-signing profile.

" } }, + "SigningRegion": { + "base": null, + "refs": { + "SigV4Authorization$signingRegion": "

The signing region.

" + } + }, "SkippedFindingsCount": { "base": null, "refs": { @@ -4961,6 +5204,12 @@ "ListThingRegistrationTasksRequest$status": "

The status of the bulk thing provisioning task.

" } }, + "StdDeviation": { + "base": null, + "refs": { + "Statistics$stdDeviation": "

The standard deviation of the aggregated field values.

" + } + }, "StepFunctionsAction": { "base": "

Starts execution of a Step Functions state machine.

", "refs": { @@ -5074,7 +5323,9 @@ "CloudwatchMetricAction$metricTimestamp": "

An optional Unix timestamp.

", "CreateTopicRuleRequest$tags": "

Metadata which can be used to manage the topic rule.

For URI Request parameters use format: ...key1=value1&key2=value2...

For the CLI command-line parameter use format: --tags \"key1=value1&key2=value2...\"

For the cli-input-json file use format: \"tags\": \"key1=value1&key2=value2...\"

", "StringMap$key": null, - "StringMap$value": null + "StringMap$value": null, + "TopicRuleDestination$statusReason": "

Additional details or reason why the topic rule destination is in the current status.

", + "TopicRuleDestinationSummary$statusReason": "

The reason the topic rule destination is in the current status.

" } }, "StringMap": { @@ -5096,6 +5347,18 @@ "JobProcessDetails$numberOfSucceededThings": "

The number of things which successfully completed the job.

" } }, + "Sum": { + "base": null, + "refs": { + "Statistics$sum": "

The sum of the aggregated fields. If the field data type is String this value is indeterminate.

" + } + }, + "SumOfSquares": { + "base": null, + "refs": { + "Statistics$sumOfSquares": "

The sum of the squares of the aggregated field values.

" + } + }, "TableName": { "base": null, "refs": { @@ -5647,6 +5910,45 @@ "GetTopicRuleResponse$rule": "

The rule.

" } }, + "TopicRuleDestination": { + "base": "

A topic rule destination.

", + "refs": { + "CreateTopicRuleDestinationResponse$topicRuleDestination": "

The topic rule destination.

", + "GetTopicRuleDestinationResponse$topicRuleDestination": "

The topic rule destination.

" + } + }, + "TopicRuleDestinationConfiguration": { + "base": "

Configuration of the topic rule destination.

", + "refs": { + "CreateTopicRuleDestinationRequest$destinationConfiguration": "

The topic rule destination configuration.

" + } + }, + "TopicRuleDestinationMaxResults": { + "base": null, + "refs": { + "ListTopicRuleDestinationsRequest$maxResults": "

The maximum number of results to return at one time.

" + } + }, + "TopicRuleDestinationStatus": { + "base": null, + "refs": { + "TopicRuleDestination$status": "

The status of the topic rule destination. Valid values are:

IN_PROGRESS

A topic rule destination was created but has not been confirmed. You can set status to IN_PROGRESS by calling UpdateTopicRuleDestination. Calling UpdateTopicRuleDestination causes a new confirmation challenge to be sent to your confirmation endpoint.

ENABLED

Confirmation was completed, and traffic to this destination is allowed. You can set status to DISABLED by calling UpdateTopicRuleDestination.

DISABLED

Confirmation was completed, and traffic to this destination is not allowed. You can set status to ENABLED by calling UpdateTopicRuleDestination.

ERROR

Confirmation could not be completed, for example if the confirmation timed out. You can call GetTopicRuleDestination for details about the error. You can set status to IN_PROGRESS by calling UpdateTopicRuleDestination. Calling UpdateTopicRuleDestination causes a new confirmation challenge to be sent to your confirmation endpoint.

", + "TopicRuleDestinationSummary$status": "

The status of the topic rule destination. Valid values are:

IN_PROGRESS

A topic rule destination was created but has not been confirmed. You can set status to IN_PROGRESS by calling UpdateTopicRuleDestination. Calling UpdateTopicRuleDestination causes a new confirmation challenge to be sent to your confirmation endpoint.

ENABLED

Confirmation was completed, and traffic to this destination is allowed. You can set status to DISABLED by calling UpdateTopicRuleDestination.

DISABLED

Confirmation was completed, and traffic to this destination is not allowed. You can set status to ENABLED by calling UpdateTopicRuleDestination.

ERROR

Confirmation could not be completed, for example if the confirmation timed out. You can call GetTopicRuleDestination for details about the error. You can set status to IN_PROGRESS by calling UpdateTopicRuleDestination. Calling UpdateTopicRuleDestination causes a new confirmation challenge to be sent to your confirmation endpoint.

", + "UpdateTopicRuleDestinationRequest$status": "

The status of the topic rule destination. Valid values are:

IN_PROGRESS

A topic rule destination was created but has not been confirmed. You can set status to IN_PROGRESS by calling UpdateTopicRuleDestination. Calling UpdateTopicRuleDestination causes a new confirmation challenge to be sent to your confirmation endpoint.

ENABLED

Confirmation was completed, and traffic to this destination is allowed. You can set status to DISABLED by calling UpdateTopicRuleDestination.

DISABLED

Confirmation was completed, and traffic to this destination is not allowed. You can set status to ENABLED by calling UpdateTopicRuleDestination.

ERROR

Confirmation could not be completed, for example if the confirmation timed out. You can call GetTopicRuleDestination for details about the error. You can set status to IN_PROGRESS by calling UpdateTopicRuleDestination. Calling UpdateTopicRuleDestination causes a new confirmation challenge to be sent to your confirmation endpoint.

" + } + }, + "TopicRuleDestinationSummaries": { + "base": null, + "refs": { + "ListTopicRuleDestinationsResponse$destinationSummaries": "

Information about a topic rule destination.

" + } + }, + "TopicRuleDestinationSummary": { + "base": "

Information about the topic rule destination.

", + "refs": { + "TopicRuleDestinationSummaries$member": null + } + }, "TopicRuleList": { "base": null, "refs": { @@ -5659,6 +5961,12 @@ "TopicRuleList$member": null } }, + "TopicRuleMaxResults": { + "base": null, + "refs": { + "ListTopicRulesRequest$maxResults": "

The maximum number of results to return.

" + } + }, "TopicRulePayload": { "base": "

Describes a rule.

", "refs": { @@ -5904,6 +6212,26 @@ "refs": { } }, + "UpdateTopicRuleDestinationRequest": { + "base": null, + "refs": { + } + }, + "UpdateTopicRuleDestinationResponse": { + "base": null, + "refs": { + } + }, + "Url": { + "base": null, + "refs": { + "HttpAction$url": "

The endpoint URL. If substitution templates are used in the URL, you must also specify a confirmationUrl. If this is a new destination, a new TopicRuleDestination is created if possible.

", + "HttpAction$confirmationUrl": "

The URL to which AWS IoT sends a confirmation message. The value of the confirmation URL must be a prefix of the endpoint URL. If you do not specify a confirmation URL AWS IoT uses the endpoint URL as the confirmation URL. If you use substitution templates in the confirmationUrl, you must create and enable topic rule destinations that match each possible value of the substituion template before traffic is allowed to your endpoint URL.

", + "HttpUrlDestinationConfiguration$confirmationUrl": "

The URL AWS IoT uses to confirm ownership of or access to the topic rule destination URL.

", + "HttpUrlDestinationProperties$confirmationUrl": "

The URL used to confirm the HTTP topic rule destination URL.

", + "HttpUrlDestinationSummary$confirmationUrl": "

The URL used to confirm ownership of or access to the HTTP topic rule destination URL.

" + } + }, "UseBase64": { "base": null, "refs": { @@ -5946,6 +6274,12 @@ "Parameters$value": null } }, + "Variance": { + "base": null, + "refs": { + "Statistics$variance": "

The variance of the aggregated field values.

" + } + }, "Version": { "base": null, "refs": { diff --git a/models/apis/lambda/2015-03-31/api-2.json b/models/apis/lambda/2015-03-31/api-2.json index 0621a198e42..70ab245b1c6 100644 --- a/models/apis/lambda/2015-03-31/api-2.json +++ b/models/apis/lambda/2015-03-31/api-2.json @@ -2061,10 +2061,13 @@ "nodejs6.10", "nodejs8.10", "nodejs10.x", + "nodejs12.x", "java8", + "java11", "python2.7", "python3.6", "python3.7", + "python3.8", "dotnetcore1.0", "dotnetcore2.0", "dotnetcore2.1", diff --git a/models/apis/logs/2014-03-28/docs-2.json b/models/apis/logs/2014-03-28/docs-2.json index f05b73045aa..63521292674 100644 --- a/models/apis/logs/2014-03-28/docs-2.json +++ b/models/apis/logs/2014-03-28/docs-2.json @@ -5,7 +5,7 @@ "AssociateKmsKey": "

Associates the specified AWS Key Management Service (AWS KMS) customer master key (CMK) with the specified log group.

Associating an AWS KMS CMK with a log group overrides any existing associations between the log group and a CMK. After a CMK is associated with a log group, all newly ingested data for the log group is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.

Note that it can take up to 5 minutes for this operation to take effect.

If you attempt to associate a CMK with a log group but the CMK does not exist or the CMK is disabled, you will receive an InvalidParameterException error.

", "CancelExportTask": "

Cancels the specified export task.

The task must be in the PENDING or RUNNING state.

", "CreateExportTask": "

Creates an export task, which allows you to efficiently export data from a log group to an Amazon S3 bucket.

This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask.

You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate out log data for each export task, you can specify a prefix to be used as the Amazon S3 key prefix for all exported objects.

Exporting to S3 buckets that are encrypted with AES-256 is supported. Exporting to S3 buckets encrypted with SSE-KMS is not supported.

", - "CreateLogGroup": "

Creates a log group with the specified name.

You can create up to 5000 log groups per account.

You must use the following guidelines when naming a log group:

If you associate a AWS Key Management Service (AWS KMS) customer master key (CMK) with the log group, ingested data is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.

If you attempt to associate a CMK with the log group but the CMK does not exist or the CMK is disabled, you will receive an InvalidParameterException error.

", + "CreateLogGroup": "

Creates a log group with the specified name.

You can create up to 20,000 log groups per account.

You must use the following guidelines when naming a log group:

If you associate a AWS Key Management Service (AWS KMS) customer master key (CMK) with the log group, ingested data is encrypted using the CMK. This association is stored as long as the data encrypted with the CMK is still within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt this data whenever it is requested.

If you attempt to associate a CMK with the log group but the CMK does not exist or the CMK is disabled, you will receive an InvalidParameterException error.

", "CreateLogStream": "

Creates a log stream for the specified log group.

There is no limit on the number of log streams that you can create for a log group.

You must use the following guidelines when naming a log stream:

", "DeleteDestination": "

Deletes the specified destination, and eventually disables all the subscription filters that publish to it. This operation does not delete the physical resource encapsulated by the destination.

", "DeleteLogGroup": "

Deletes the specified log group and permanently deletes all the archived log events associated with the log group.

", @@ -29,7 +29,7 @@ "GetLogRecord": "

Retrieves all the fields and values of a single log event. All fields are retrieved, even if the original query that produced the logRecordPointer retrieved only a subset of fields. Fields are returned as field name/field value pairs.

Additionally, the entire unparsed log event is returned within @message.

", "GetQueryResults": "

Returns the results from the specified query.

Only the fields requested in the query are returned, along with a @ptr field which is the identifier for the log record. You can use the value of @ptr in a operation to get the full log record.

GetQueryResults does not start a query execution. To run a query, use .

If the value of the Status field in the output is Running, this operation returns only partial results. If you see a value of Scheduled or Running for the status, you can retry the operation later to see the final results.

", "ListTagsLogGroup": "

Lists the tags for the specified log group.

", - "PutDestination": "

Creates or updates a destination. A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents. A destination can be an Amazon Kinesis stream, Amazon Kinesis Data Firehose strea, or an AWS Lambda function.

Through an access policy, a destination controls what is written to it. By default, PutDestination does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination.

", + "PutDestination": "

Creates or updates a destination. This operation is used only to create destinations for cross-account subscriptions.

A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents.

Through an access policy, a destination controls what is written to it. By default, PutDestination does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination.

", "PutDestinationPolicy": "

Creates or updates an access policy associated with an existing destination. An access policy is an IAM policy document that is used to authorize claims to register a subscription filter against a given destination.

", "PutLogEvents": "

Uploads a batch of log events to the specified log stream.

You must include the sequence token obtained from the response of the previous call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token using DescribeLogStreams. If you call PutLogEvents twice within a narrow time period using the same value for sequenceToken, both calls may be successful, or one may be rejected.

The batch of events must satisfy the following constraints:

If a call to PutLogEvents returns \"UnrecognizedClientException\" the most likely cause is an invalid AWS access key ID or secret key.

", "PutMetricFilter": "

Creates or updates a metric filter and associates it with the specified log group. Metric filters allow you to configure rules to extract metric data from log events ingested through PutLogEvents.

The maximum number of metric filters that can be associated with a log group is 100.

", @@ -314,7 +314,7 @@ "refs": { "FilterLogEventsRequest$limit": "

The maximum number of events to return. The default is 10,000 events.

", "GetLogEventsRequest$limit": "

The maximum number of log events returned. If you don't specify a value, the maximum is as many log events as can fit in a response size of 1 MB, up to 10,000 log events.

", - "StartQueryRequest$limit": "

The maximum number of log events to return in the query. If the query string uses the fields command, only the specified fields and their values are returned.

" + "StartQueryRequest$limit": "

The maximum number of log events to return in the query. If the query string uses the fields command, only the specified fields and their values are returned. The default is 1000.

" } }, "ExportDestinationBucket": { @@ -762,7 +762,7 @@ "DescribeSubscriptionFiltersResponse$nextToken": null, "FilterLogEventsRequest$nextToken": "

The token for the next set of events to return. (You received this token from a previous call.)

", "FilterLogEventsResponse$nextToken": "

The token to use when requesting the next set of items. The token expires after 24 hours.

", - "GetLogEventsRequest$nextToken": "

The token for the next set of items to return. (You received this token from a previous call.)

", + "GetLogEventsRequest$nextToken": "

The token for the next set of items to return. (You received this token from a previous call.)

Using this token works only when you specify true for startFromHead.

", "GetLogEventsResponse$nextForwardToken": "

The token for the next set of items in the forward direction. The token expires after 24 hours. If you have reached the end of the stream, it will return the same token you passed in.

", "GetLogEventsResponse$nextBackwardToken": "

The token for the next set of items in the backward direction. The token expires after 24 hours. This token will never be null. If you have reached the end of the stream, it will return the same token you passed in.

" } @@ -1043,7 +1043,7 @@ "base": null, "refs": { "LogGroup$storedBytes": "

The number of bytes stored.

", - "LogStream$storedBytes": "

The number of bytes stored.

IMPORTANT: Starting on June 17, 2019, this parameter will be deprecated for log streams, and will be reported as zero. This change applies only to log streams. The storedBytes parameter for log groups is not affected.

" + "LogStream$storedBytes": "

The number of bytes stored.

IMPORTANT:On June 17, 2019, this parameter was deprecated for log streams, and is always reported as zero. This change applies only to log streams. The storedBytes parameter for log groups is not affected.

" } }, "SubscriptionFilter": { diff --git a/models/apis/mediaconvert/2017-08-29/api-2.json b/models/apis/mediaconvert/2017-08-29/api-2.json index 64792eeab41..660be2dddcb 100644 --- a/models/apis/mediaconvert/2017-08-29/api-2.json +++ b/models/apis/mediaconvert/2017-08-29/api-2.json @@ -1230,7 +1230,7 @@ "locationName": "codecSettings" }, "CustomLanguageCode": { - "shape": "__stringMin3Max3PatternAZaZ3", + "shape": "__stringPatternAZaZ23AZaZ", "locationName": "customLanguageCode" }, "LanguageCode": { @@ -1568,7 +1568,7 @@ "locationName": "captionSelectorName" }, "CustomLanguageCode": { - "shape": "__stringMin3Max3PatternAZaZ3", + "shape": "__stringPatternAZaZ23AZaZ", "locationName": "customLanguageCode" }, "DestinationSettings": { @@ -1589,7 +1589,7 @@ "type": "structure", "members": { "CustomLanguageCode": { - "shape": "__stringMin3Max3PatternAZaZ3", + "shape": "__stringPatternAZaZ23AZaZ", "locationName": "customLanguageCode" }, "DestinationSettings": { @@ -1736,6 +1736,19 @@ } } }, + "CmafAdditionalManifest": { + "type": "structure", + "members": { + "ManifestNameModifier": { + "shape": "__stringMin1", + "locationName": "manifestNameModifier" + }, + "SelectedOutputs": { + "shape": "__listOf__stringMin1", + "locationName": "selectedOutputs" + } + } + }, "CmafClientCache": { "type": "string", "enum": [ @@ -1789,6 +1802,10 @@ "CmafGroupSettings": { "type": "structure", "members": { + "AdditionalManifests": { + "shape": "__listOfCmafAdditionalManifest", + "locationName": "additionalManifests" + }, "BaseUrl": { "shape": "__string", "locationName": "baseUrl" @@ -2030,6 +2047,10 @@ "Mp4Settings": { "shape": "Mp4Settings", "locationName": "mp4Settings" + }, + "MpdSettings": { + "shape": "MpdSettings", + "locationName": "mpdSettings" } } }, @@ -2248,6 +2269,19 @@ } } }, + "DashAdditionalManifest": { + "type": "structure", + "members": { + "ManifestNameModifier": { + "shape": "__stringMin1", + "locationName": "manifestNameModifier" + }, + "SelectedOutputs": { + "shape": "__listOf__stringMin1", + "locationName": "selectedOutputs" + } + } + }, "DashIsoEncryptionSettings": { "type": "structure", "members": { @@ -2264,6 +2298,10 @@ "DashIsoGroupSettings": { "type": "structure", "members": { + "AdditionalManifests": { + "shape": "__listOfDashAdditionalManifest", + "locationName": "additionalManifests" + }, "BaseUrl": { "shape": "__string", "locationName": "baseUrl" @@ -2512,6 +2550,50 @@ "members": { } }, + "DolbyVision": { + "type": "structure", + "members": { + "L6Metadata": { + "shape": "DolbyVisionLevel6Metadata", + "locationName": "l6Metadata" + }, + "L6Mode": { + "shape": "DolbyVisionLevel6Mode", + "locationName": "l6Mode" + }, + "Profile": { + "shape": "DolbyVisionProfile", + "locationName": "profile" + } + } + }, + "DolbyVisionLevel6Metadata": { + "type": "structure", + "members": { + "MaxCll": { + "shape": "__integerMin0Max65535", + "locationName": "maxCll" + }, + "MaxFall": { + "shape": "__integerMin0Max65535", + "locationName": "maxFall" + } + } + }, + "DolbyVisionLevel6Mode": { + "type": "string", + "enum": [ + "PASSTHROUGH", + "RECALCULATE", + "SPECIFY" + ] + }, + "DolbyVisionProfile": { + "type": "string", + "enum": [ + "PROFILE_5" + ] + }, "DropFrameTimecode": { "type": "string", "enum": [ @@ -4117,6 +4199,19 @@ "ELEMENTAL_SCTE35" ] }, + "HlsAdditionalManifest": { + "type": "structure", + "members": { + "ManifestNameModifier": { + "shape": "__stringMin1", + "locationName": "manifestNameModifier" + }, + "SelectedOutputs": { + "shape": "__listOf__stringMin1", + "locationName": "selectedOutputs" + } + } + }, "HlsAudioOnlyContainer": { "type": "string", "enum": [ @@ -4230,6 +4325,10 @@ "shape": "__listOfHlsAdMarkers", "locationName": "adMarkers" }, + "AdditionalManifests": { + "shape": "__listOfHlsAdditionalManifest", + "locationName": "additionalManifests" + }, "BaseUrl": { "shape": "__string", "locationName": "baseUrl" @@ -5950,6 +6049,44 @@ } } }, + "MpdCaptionContainerType": { + "type": "string", + "enum": [ + "RAW", + "FRAGMENTED_MP4" + ] + }, + "MpdScte35Esam": { + "type": "string", + "enum": [ + "INSERT", + "NONE" + ] + }, + "MpdScte35Source": { + "type": "string", + "enum": [ + "PASSTHROUGH", + "NONE" + ] + }, + "MpdSettings": { + "type": "structure", + "members": { + "CaptionContainerType": { + "shape": "MpdCaptionContainerType", + "locationName": "captionContainerType" + }, + "Scte35Esam": { + "shape": "MpdScte35Esam", + "locationName": "scte35Esam" + }, + "Scte35Source": { + "shape": "MpdScte35Source", + "locationName": "scte35Source" + } + } + }, "Mpeg2AdaptiveQuantization": { "type": "string", "enum": [ @@ -6217,6 +6354,19 @@ "ENABLED" ] }, + "MsSmoothAdditionalManifest": { + "type": "structure", + "members": { + "ManifestNameModifier": { + "shape": "__stringMin1", + "locationName": "manifestNameModifier" + }, + "SelectedOutputs": { + "shape": "__listOf__stringMin1", + "locationName": "selectedOutputs" + } + } + }, "MsSmoothAudioDeduplication": { "type": "string", "enum": [ @@ -6236,6 +6386,10 @@ "MsSmoothGroupSettings": { "type": "structure", "members": { + "AdditionalManifests": { + "shape": "__listOfMsSmoothAdditionalManifest", + "locationName": "additionalManifests" + }, "AudioDeduplication": { "shape": "MsSmoothAudioDeduplication", "locationName": "audioDeduplication" @@ -6895,9 +7049,22 @@ "PASSTHROUGH" ] }, + "S3DestinationAccessControl": { + "type": "structure", + "members": { + "CannedAcl": { + "shape": "S3ObjectCannedAcl", + "locationName": "cannedAcl" + } + } + }, "S3DestinationSettings": { "type": "structure", "members": { + "AccessControl": { + "shape": "S3DestinationAccessControl", + "locationName": "accessControl" + }, "Encryption": { "shape": "S3EncryptionSettings", "locationName": "encryption" @@ -6917,6 +7084,15 @@ } } }, + "S3ObjectCannedAcl": { + "type": "string", + "enum": [ + "PUBLIC_READ", + "AUTHENTICATED_READ", + "BUCKET_OWNER_READ", + "BUCKET_OWNER_FULL_CONTROL" + ] + }, "S3ServerSideEncryptionType": { "type": "string", "enum": [ @@ -6936,6 +7112,7 @@ "enum": [ "FRAMERATE_23_97", "FRAMERATE_24", + "FRAMERATE_25", "FRAMERATE_29_97_DROPFRAME", "FRAMERATE_29_97_NON_DROPFRAME" ] @@ -7503,6 +7680,10 @@ "shape": "Deinterlacer", "locationName": "deinterlacer" }, + "DolbyVision": { + "shape": "DolbyVision", + "locationName": "dolbyVision" + }, "ImageInserter": { "shape": "ImageInserter", "locationName": "imageInserter" @@ -7981,6 +8162,18 @@ "shape": "CaptionDescriptionPreset" } }, + "__listOfCmafAdditionalManifest": { + "type": "list", + "member": { + "shape": "CmafAdditionalManifest" + } + }, + "__listOfDashAdditionalManifest": { + "type": "list", + "member": { + "shape": "DashAdditionalManifest" + } + }, "__listOfEndpoint": { "type": "list", "member": { @@ -7993,6 +8186,12 @@ "shape": "HlsAdMarkers" } }, + "__listOfHlsAdditionalManifest": { + "type": "list", + "member": { + "shape": "HlsAdditionalManifest" + } + }, "__listOfHlsCaptionLanguageMapping": { "type": "list", "member": { @@ -8041,6 +8240,12 @@ "shape": "JobTemplate" } }, + "__listOfMsSmoothAdditionalManifest": { + "type": "list", + "member": { + "shape": "MsSmoothAdditionalManifest" + } + }, "__listOfOutput": { "type": "list", "member": { @@ -8286,6 +8491,10 @@ "type": "string", "pattern": "^[A-Za-z0-9]{32}$" }, + "__stringPatternAZaZ23AZaZ": { + "type": "string", + "pattern": "^[A-Za-z]{2,3}(-[A-Za-z-]+)?$" + }, "__stringPatternArnAwsUsGovAcm": { "type": "string", "pattern": "^arn:aws(-us-gov)?:acm:" @@ -8300,11 +8509,11 @@ }, "__stringPatternHttpHttpsS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEE": { "type": "string", - "pattern": "^(http|https|s3)://([^\\/]+\\/)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))$" + "pattern": "^(http|https|s3)://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE])))$" }, "__stringPatternHttpHttpsS3MM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMOOVVMMTTSSMM2TTWWMMVVAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8WWEEBBMMLLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMXXMMLL": { "type": "string", - "pattern": "^(http|https|s3)://([^\\/]+\\/)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL])))$" + "pattern": "^(http|https|s3)://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vV]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL])))$" }, "__stringPatternHttps": { "type": "string", diff --git a/models/apis/mediaconvert/2017-08-29/docs-2.json b/models/apis/mediaconvert/2017-08-29/docs-2.json index c9b3741fde0..46b86087f71 100644 --- a/models/apis/mediaconvert/2017-08-29/docs-2.json +++ b/models/apis/mediaconvert/2017-08-29/docs-2.json @@ -206,9 +206,9 @@ } }, "AudioLanguageCodeControl": { - "base": "Choosing FOLLOW_INPUT will cause the ISO 639 language code of the output to follow the ISO 639 language code of the input. The language specified for languageCode' will be used when USE_CONFIGURED is selected or when FOLLOW_INPUT is selected but there is no ISO 639 language code specified by the input.", + "base": "Specify which source for language code takes precedence for this audio track. When you choose Follow input (FOLLOW_INPUT), the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code (languageCode or customLanguageCode). When you choose Use configured (USE_CONFIGURED), the service uses the language code that you specify.", "refs": { - "AudioDescription$LanguageCodeControl": "Choosing FOLLOW_INPUT will cause the ISO 639 language code of the output to follow the ISO 639 language code of the input. The language specified for languageCode' will be used when USE_CONFIGURED is selected or when FOLLOW_INPUT is selected but there is no ISO 639 language code specified by the input." + "AudioDescription$LanguageCodeControl": "Specify which source for language code takes precedence for this audio track. When you choose Follow input (FOLLOW_INPUT), the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code (languageCode or customLanguageCode). When you choose Use configured (USE_CONFIGURED), the service uses the language code that you specify." } }, "AudioNormalizationAlgorithm": { @@ -385,6 +385,12 @@ "RemixSettings$ChannelMapping": "Channel mapping (ChannelMapping) contains the group of fields that hold the remixing value for each channel. Units are in dB. Acceptable values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification)." } }, + "CmafAdditionalManifest": { + "base": "Specify the details for each pair of HLS and DASH additional manifests that you want the service to generate for this CMAF output group. Each pair of manifests can reference a different subset of outputs in the group.", + "refs": { + "__listOfCmafAdditionalManifest$member": null + } + }, "CmafClientCache": { "base": "When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client from saving media segments for later replay.", "refs": { @@ -564,6 +570,12 @@ "refs": { } }, + "DashAdditionalManifest": { + "base": "Specify the details for each additional DASH manifest that you want the service to generate for this output group. Each manifest can reference a different subset of outputs in the group.", + "refs": { + "__listOfDashAdditionalManifest$member": null + } + }, "DashIsoEncryptionSettings": { "base": "Specifies DRM settings for DASH outputs.", "refs": { @@ -702,6 +714,30 @@ "refs": { } }, + "DolbyVision": { + "base": "Settings for Dolby Vision", + "refs": { + "VideoPreprocessor$DolbyVision": "Enable Dolby Vision feature to produce Dolby Vision compatible video output." + } + }, + "DolbyVisionLevel6Metadata": { + "base": "Use these settings when you set DolbyVisionLevel6Mode to SPECIFY to override the MaxCLL and MaxFALL values in your input with new values.", + "refs": { + "DolbyVision$L6Metadata": "Use these settings when you set DolbyVisionLevel6Mode to SPECIFY to override the MaxCLL and MaxFALL values in your input with new values." + } + }, + "DolbyVisionLevel6Mode": { + "base": "Use Dolby Vision Mode to choose how the service will handle Dolby Vision MaxCLL and MaxFALL properies.", + "refs": { + "DolbyVision$L6Mode": "Use Dolby Vision Mode to choose how the service will handle Dolby Vision MaxCLL and MaxFALL properies." + } + }, + "DolbyVisionProfile": { + "base": "In the current MediaConvert implementation, the Dolby Vision profile is always 5 (PROFILE_5). Therefore, all of your inputs must contain Dolby Vision frame interleaved data.", + "refs": { + "DolbyVision$Profile": "In the current MediaConvert implementation, the Dolby Vision profile is always 5 (PROFILE_5). Therefore, all of your inputs must contain Dolby Vision frame interleaved data." + } + }, "DropFrameTimecode": { "base": "Applies only to 29.97 fps outputs. When this feature is enabled, the service will use drop-frame timecode on outputs. If it is not possible to use drop-frame timecode, the system will fall back to non-drop-frame. This setting is enabled by default when Timecode insertion (TimecodeInsertion) is enabled.", "refs": { @@ -1361,9 +1397,9 @@ } }, "H265WriteMp4PackagingType": { - "base": "If the location of parameter set NAL units doesn't matter in your workflow, ignore this setting. Use this setting in your CMAF, DASH, or file MP4 output. For file MP4 outputs, choosing HVC1 can create video that doesn't work properly with some downstream systems and video players. Choose HVC1 to mark your output as HVC1. This makes your output compliant with the following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service stores parameter set NAL units in the sample headers but not in the samples directly. The service defaults to marking your output as HEV1. For these outputs, the service writes parameter set NAL units directly into the samples.", + "base": "If the location of parameter set NAL units doesn't matter in your workflow, ignore this setting. Use this setting only with CMAF or DASH outputs, or with standalone file outputs in an MPEG-4 container (MP4 outputs). Choose HVC1 to mark your output as HVC1. This makes your output compliant with the following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service stores parameter set NAL units in the sample headers but not in the samples directly. For MP4 outputs, when you choose HVC1, your output video might not work properly with some downstream systems and video players. The service defaults to marking your output as HEV1. For these outputs, the service writes parameter set NAL units directly into the samples.", "refs": { - "H265Settings$WriteMp4PackagingType": "If the location of parameter set NAL units doesn't matter in your workflow, ignore this setting. Use this setting in your CMAF, DASH, or file MP4 output. For file MP4 outputs, choosing HVC1 can create video that doesn't work properly with some downstream systems and video players. Choose HVC1 to mark your output as HVC1. This makes your output compliant with the following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service stores parameter set NAL units in the sample headers but not in the samples directly. The service defaults to marking your output as HEV1. For these outputs, the service writes parameter set NAL units directly into the samples." + "H265Settings$WriteMp4PackagingType": "If the location of parameter set NAL units doesn't matter in your workflow, ignore this setting. Use this setting only with CMAF or DASH outputs, or with standalone file outputs in an MPEG-4 container (MP4 outputs). Choose HVC1 to mark your output as HVC1. This makes your output compliant with the following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these outputs, the service stores parameter set NAL units in the sample headers but not in the samples directly. For MP4 outputs, when you choose HVC1, your output video might not work properly with some downstream systems and video players. The service defaults to marking your output as HEV1. For these outputs, the service writes parameter set NAL units directly into the samples." } }, "Hdr10Metadata": { @@ -1379,6 +1415,12 @@ "__listOfHlsAdMarkers$member": null } }, + "HlsAdditionalManifest": { + "base": "Specify the details for each additional HLS manifest that you want the service to generate for this output group. Each manifest can reference a different subset of outputs in the group.", + "refs": { + "__listOfHlsAdditionalManifest$member": null + } + }, "HlsAudioOnlyContainer": { "base": "Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream (M2TS) to create a file in an MPEG2-TS container. Keep the default value Automatic (AUTOMATIC) to create a raw audio-only file with no container. Regardless of the value that you specify here, if this output has video, the service will place outputs into an MPEG2-TS container.", "refs": { @@ -1936,6 +1978,30 @@ "ContainerSettings$Mp4Settings": "Settings for MP4 container. You can create audio-only AAC outputs with this container." } }, + "MpdCaptionContainerType": { + "base": "Use this setting only in DASH output groups that include sidecar TTML or IMSC captions. You specify sidecar captions in a separate output from your audio and video. Choose Raw (RAW) for captions in a single XML file in a raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in XML format contained within fragmented MP4 files. This set of fragmented MP4 files is separate from your video and audio fragmented MP4 files.", + "refs": { + "MpdSettings$CaptionContainerType": "Use this setting only in DASH output groups that include sidecar TTML or IMSC captions. You specify sidecar captions in a separate output from your audio and video. Choose Raw (RAW) for captions in a single XML file in a raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in XML format contained within fragmented MP4 files. This set of fragmented MP4 files is separate from your video and audio fragmented MP4 files." + } + }, + "MpdScte35Esam": { + "base": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml).", + "refs": { + "MpdSettings$Scte35Esam": "Use this setting only when you specify SCTE-35 markers from ESAM. Choose INSERT to put SCTE-35 markers in this output at the insertion points that you specify in an ESAM XML document. Provide the document in the setting SCC XML (sccXml)." + } + }, + "MpdScte35Source": { + "base": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output.", + "refs": { + "MpdSettings$Scte35Source": "Ignore this setting unless you have SCTE-35 markers in your input video file. Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear in your input to also appear in this output. Choose None (NONE) if you don't want those SCTE-35 markers in this output." + } + }, + "MpdSettings": { + "base": "Settings for MP4 segments in DASH", + "refs": { + "ContainerSettings$MpdSettings": "Settings for MP4 segments in DASH" + } + }, "Mpeg2AdaptiveQuantization": { "base": "Adaptive quantization. Allows intra-frame quantizers to vary to improve visual quality.", "refs": { @@ -2050,6 +2116,12 @@ "Mpeg2Settings$TemporalAdaptiveQuantization": "Adjust quantization within each frame based on temporal variation of content complexity." } }, + "MsSmoothAdditionalManifest": { + "base": "Specify the details for each additional Microsoft Smooth Streaming manifest that you want the service to generate for this output group. Each manifest can reference a different subset of outputs in the group.", + "refs": { + "__listOfMsSmoothAdditionalManifest$member": null + } + }, "MsSmoothAudioDeduplication": { "base": "COMBINE_DUPLICATE_STREAMS combines identical audio encoding settings across a Microsoft Smooth output group into a single audio stream.", "refs": { @@ -2336,6 +2408,12 @@ "VideoDescription$RespondToAfd": "Use Respond to AFD (RespondToAfd) to specify how the service changes the video itself in response to AFD values in the input. * Choose Respond to clip the input video frame according to the AFD value, input display aspect ratio, and output display aspect ratio. * Choose Passthrough to include the input AFD values. Do not choose this when AfdSignaling is set to (NONE). A preferred implementation of this workflow is to set RespondToAfd to (NONE) and set AfdSignaling to (AUTO). * Choose None to remove all input AFD values from this output." } }, + "S3DestinationAccessControl": { + "base": "Optional. Have MediaConvert automatically apply Amazon S3 access control for the outputs in this output group. When you don't use this setting, S3 automatically applies the default access control list PRIVATE.", + "refs": { + "S3DestinationSettings$AccessControl": "Optional. Have MediaConvert automatically apply Amazon S3 access control for the outputs in this output group. When you don't use this setting, S3 automatically applies the default access control list PRIVATE." + } + }, "S3DestinationSettings": { "base": "Settings associated with S3 destination", "refs": { @@ -2348,6 +2426,12 @@ "S3DestinationSettings$Encryption": "Settings for how your job outputs are encrypted as they are uploaded to Amazon S3." } }, + "S3ObjectCannedAcl": { + "base": "Choose an Amazon S3 canned ACL for MediaConvert to apply to this output.", + "refs": { + "S3DestinationAccessControl$CannedAcl": "Choose an Amazon S3 canned ACL for MediaConvert to apply to this output." + } + }, "S3ServerSideEncryptionType": { "base": "Specify how you want your data keys managed. AWS uses data keys to encrypt your content. AWS also encrypts the data keys themselves, using a customer master key (CMK), and then stores the encrypted data keys alongside your encrypted content. Use this setting to specify which AWS service manages the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). If you want your master key to be managed by AWS Key Management Service (KMS), choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your data keys. You can optionally choose to specify a different, customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the setting KMS ARN (kmsKeyArn).", "refs": { @@ -2846,6 +2930,8 @@ "__integerMin0Max65535": { "base": null, "refs": { + "DolbyVisionLevel6Metadata$MaxCll": "Maximum Content Light Level. Static HDR metadata that corresponds to the brightest pixel in the entire stream. Measured in nits.", + "DolbyVisionLevel6Metadata$MaxFall": "Maximum Frame-Average Light Level. Static HDR metadata that corresponds to the highest frame-average brightness in the entire stream. Measured in nits.", "DvbNitSettings$NetworkId": "The numeric value placed in the Network Information Table (NIT).", "Hdr10Metadata$MaxContentLightLevel": "Maximum light level among all samples in the coded video sequence, in units of candelas per square meter. This setting doesn't have a default value; you must specify a value that is suitable for the content.", "Hdr10Metadata$MaxFrameAverageLightLevel": "Maximum average light level of any frame in the coded video sequence, in units of candelas per square meter. This setting doesn't have a default value; you must specify a value that is suitable for the content.", @@ -3263,6 +3349,18 @@ "PresetSettings$CaptionDescriptions": "Caption settings for this preset. There can be multiple caption settings in a single output." } }, + "__listOfCmafAdditionalManifest": { + "base": null, + "refs": { + "CmafGroupSettings$AdditionalManifests": "By default, the service creates one top-level .m3u8 HLS manifest and one top -level .mpd DASH manifest for each CMAF output group in your job. These default manifests reference every output in the output group. To create additional top-level manifests that reference a subset of the outputs in the output group, specify a list of them here. For each additional manifest that you specify, the service creates one HLS manifest and one DASH manifest." + } + }, + "__listOfDashAdditionalManifest": { + "base": null, + "refs": { + "DashIsoGroupSettings$AdditionalManifests": "By default, the service creates one .mpd DASH manifest for each DASH ISO output group in your job. This default manifest references every output in the output group. To create additional DASH manifests that reference a subset of the outputs in the output group, specify a list of them here." + } + }, "__listOfEndpoint": { "base": null, "refs": { @@ -3275,6 +3373,12 @@ "HlsGroupSettings$AdMarkers": "Choose one or more ad marker types to decorate your Apple HLS manifest. This setting does not determine whether SCTE-35 markers appear in the outputs themselves." } }, + "__listOfHlsAdditionalManifest": { + "base": null, + "refs": { + "HlsGroupSettings$AdditionalManifests": "By default, the service creates one top-level .m3u8 HLS manifest for each HLS output group in your job. This default manifest references every output in the output group. To create additional top-level manifests that reference a subset of the outputs in the output group, specify a list of them here." + } + }, "__listOfHlsCaptionLanguageMapping": { "base": null, "refs": { @@ -3324,6 +3428,12 @@ "ListJobTemplatesResponse$JobTemplates": "List of Job templates." } }, + "__listOfMsSmoothAdditionalManifest": { + "base": null, + "refs": { + "MsSmoothGroupSettings$AdditionalManifests": "By default, the service creates one .ism Microsoft Smooth Streaming manifest for each Microsoft Smooth Streaming output group in your job. This default manifest references every output in the output group. To create additional manifests that reference a subset of the outputs in the output group, specify a list of them here." + } + }, "__listOfOutput": { "base": null, "refs": { @@ -3405,7 +3515,11 @@ "__listOf__stringMin1": { "base": null, "refs": { - "AudioSelectorGroup$AudioSelectorNames": "Name of an Audio Selector within the same input to include in the group. Audio selector names are standardized, based on their order within the input (e.g., \"Audio Selector 1\"). The audio selector name parameter can be repeated to add any number of audio selectors to the group." + "AudioSelectorGroup$AudioSelectorNames": "Name of an Audio Selector within the same input to include in the group. Audio selector names are standardized, based on their order within the input (e.g., \"Audio Selector 1\"). The audio selector name parameter can be repeated to add any number of audio selectors to the group.", + "CmafAdditionalManifest$SelectedOutputs": "Specify the outputs that you want this additional top-level manifest to reference.", + "DashAdditionalManifest$SelectedOutputs": "Specify the outputs that you want this additional top-level manifest to reference.", + "HlsAdditionalManifest$SelectedOutputs": "Specify the outputs that you want this additional top-level manifest to reference.", + "MsSmoothAdditionalManifest$SelectedOutputs": "Specify the outputs that you want this additional top-level manifest to reference." } }, "__listOf__stringMin36Max36Pattern09aFAF809aFAF409aFAF409aFAF409aFAF12": { @@ -3564,6 +3678,10 @@ "base": null, "refs": { "CaptionDescription$CaptionSelectorName": "Specifies which \"Caption Selector\":#inputs-caption_selector to use from each input when generating captions. The name should be of the format \"Caption Selector \", which denotes that the Nth Caption Selector will be used from each input.", + "CmafAdditionalManifest$ManifestNameModifier": "Specify a name modifier that the service adds to the name of this manifest to make it different from the file names of the other main manifests in the output group. For example, say that the default main manifest for your HLS group is film-name.m3u8. If you enter \"-no-premium\" for this setting, then the file name the service generates for this top-level manifest is film-name-no-premium.m3u8. For HLS output groups, specify a manifestNameModifier that is different from the nameModifier of the output. The service uses the output name modifier to create unique names for the individual variant manifests.", + "DashAdditionalManifest$ManifestNameModifier": "Specify a name modifier that the service adds to the name of this manifest to make it different from the file names of the other main manifests in the output group. For example, say that the default main manifest for your DASH group is film-name.mpd. If you enter \"-no-premium\" for this setting, then the file name the service generates for this top-level manifest is film-name-no-premium.mpd.", + "HlsAdditionalManifest$ManifestNameModifier": "Specify a name modifier that the service adds to the name of this manifest to make it different from the file names of the other main manifests in the output group. For example, say that the default main manifest for your HLS group is film-name.m3u8. If you enter \"-no-premium\" for this setting, then the file name the service generates for this top-level manifest is film-name-no-premium.m3u8. For HLS output groups, specify a manifestNameModifier that is different from the nameModifier of the output. The service uses the output name modifier to create unique names for the individual variant manifests.", + "MsSmoothAdditionalManifest$ManifestNameModifier": "Specify a name modifier that the service adds to the name of this manifest to make it different from the file names of the other main manifests in the output group. For example, say that the default main manifest for your Microsoft Smooth group is film-name.ismv. If you enter \"-no-premium\" for this setting, then the file name the service generates for this top-level manifest is film-name-no-premium.ismv.", "Output$NameModifier": "Use Name modifier (NameModifier) to have the service add a string to the end of each output filename. You specify the base filename as part of your destination URI. When you create multiple outputs in the same output group, Name modifier (NameModifier) is required. Name modifier also accepts format identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ in one output, you must use them in the same way in all outputs of the output group.", "__listOf__stringMin1$member": null } @@ -3643,12 +3761,9 @@ "__stringMin3Max3PatternAZaZ3": { "base": null, "refs": { - "AudioDescription$CustomLanguageCode": "Specify the language for this audio output track, using the ISO 639-2 or ISO 639-3 three-letter language code. The language specified will be used when 'Follow Input Language Code' is not selected or when 'Follow Input Language Code' is selected but there is no ISO 639 language code specified by the input.", "AudioSelector$CustomLanguageCode": "Selects a specific language code from within an audio source, using the ISO 639-2 or ISO 639-3 three-letter language code", - "CaptionDescription$CustomLanguageCode": "Indicates the language of the caption output track, using the ISO 639-2 or ISO 639-3 three-letter language code. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information to choose the font language for rendering the captions text.", - "CaptionDescriptionPreset$CustomLanguageCode": "Indicates the language of the caption output track, using the ISO 639-2 or ISO 639-3 three-letter language code. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information to choose the font language for rendering the captions text.", "CaptionSelector$CustomLanguageCode": "The specific language to extract from source, using the ISO 639-2 or ISO 639-3 three-letter language code. If input is SCTE-27, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub and output is Burn-in or SMPTE-TT, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub that is being passed through, omit this field (and PID field); there is no way to extract a specific language with pass-through captions.", - "HlsCaptionLanguageMapping$CustomLanguageCode": "Specify the language for this caption channel, using the ISO 639-2 or ISO 639-3 three-letter language code" + "HlsCaptionLanguageMapping$CustomLanguageCode": "Specify the language for this captions channel, using the ISO 639-2 or ISO 639-3 three-letter language code" } }, "__stringMin9Max19PatternAZ26EastWestCentralNorthSouthEastWest1912": { @@ -3703,6 +3818,14 @@ "StaticKeyProvider$StaticKeyValue": "Relates to DRM implementation. Use a 32-character hexidecimal string to specify Key Value (StaticKeyValue)." } }, + "__stringPatternAZaZ23AZaZ": { + "base": null, + "refs": { + "AudioDescription$CustomLanguageCode": "Specify the language for this audio output track. The service puts this language code into your output audio track when you set Language code control (AudioLanguageCodeControl) to Use configured (USE_CONFIGURED). The service also uses your specified custom language code when you set Language code control (AudioLanguageCodeControl) to Follow input (FOLLOW_INPUT), but your input file doesn't specify a language code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming.", + "CaptionDescription$CustomLanguageCode": "Specify the language for this captions output track. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information when automatically selecting the font script for rendering the captions text. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming.", + "CaptionDescriptionPreset$CustomLanguageCode": "Specify the language for this captions output track. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information when automatically selecting the font script for rendering the captions text. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming." + } + }, "__stringPatternArnAwsUsGovAcm": { "base": null, "refs": { diff --git a/models/apis/mediastore/2017-09-01/api-2.json b/models/apis/mediastore/2017-09-01/api-2.json index eead68259e3..f8ada845def 100644 --- a/models/apis/mediastore/2017-09-01/api-2.json +++ b/models/apis/mediastore/2017-09-01/api-2.json @@ -457,7 +457,8 @@ "Endpoint":{ "type":"string", "max":255, - "min":1 + "min":1, + "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+" }, "ErrorMessage":{ "type":"string", @@ -675,6 +676,7 @@ }, "Tag":{ "type":"structure", + "required":["Key"], "members":{ "Key":{"shape":"TagKey"}, "Value":{"shape":"TagValue"} diff --git a/models/apis/mediastore/2017-09-01/docs-2.json b/models/apis/mediastore/2017-09-01/docs-2.json index 3d1466d1002..4b6268e38ae 100644 --- a/models/apis/mediastore/2017-09-01/docs-2.json +++ b/models/apis/mediastore/2017-09-01/docs-2.json @@ -18,7 +18,7 @@ "PutLifecyclePolicy": "

Writes an object lifecycle policy to a container. If the container already has an object lifecycle policy, the service replaces the existing policy with the new policy. It takes up to 20 minutes for the change to take effect.

For information about how to construct an object lifecycle policy, see Components of an Object Lifecycle Policy.

", "StartAccessLogging": "

Starts access logging on the specified container. When you enable access logging on a container, MediaStore delivers access logs for objects stored in that container to Amazon CloudWatch Logs.

", "StopAccessLogging": "

Stops access logging on the specified container. When you stop access logging on a container, MediaStore stops sending access logs to Amazon CloudWatch Logs. These access logs are not saved and are not retrievable.

", - "TagResource": "

Adds tags to the specified AWS Elemental MediaStore container. Tags are key:value pairs that you can associate with AWS resources. For example, the tag key might be \"customer\" and the tag value might be \"companyA.\" You can specify one or more tags to add to each container. You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.

", + "TagResource": "

Adds tags to the specified AWS Elemental MediaStore container. Tags are key:value pairs that you can associate with AWS resources. For example, the tag key might be \"customer\" and the tag value might be \"companyA.\" You can specify one or more tags to add to each container. You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.

", "UntagResource": "

Removes tags from the specified container. You can specify one or more tags to remove.

" }, "shapes": { @@ -374,7 +374,7 @@ } }, "Tag": { - "base": "

A collection of tags associated with a container. Each tag consists of a key:value pair, which can be anything you define. Typically, the tag key represents a category (such as \"environment\") and the tag value represents a specific value within that category (such as \"test,\" \"development,\" or \"production\"). You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.

", + "base": "

A collection of tags associated with a container. Each tag consists of a key:value pair, which can be anything you define. Typically, the tag key represents a category (such as \"environment\") and the tag value represents a specific value within that category (such as \"test,\" \"development,\" or \"production\"). You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.

", "refs": { "TagList$member": null } @@ -395,7 +395,7 @@ "TagList": { "base": null, "refs": { - "CreateContainerInput$Tags": "

An array of key:value pairs that you define. These values can be anything that you want. Typically, the tag key represents a category (such as \"environment\") and the tag value represents a specific value within that category (such as \"test,\" \"development,\" or \"production\"). You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.

", + "CreateContainerInput$Tags": "

An array of key:value pairs that you define. These values can be anything that you want. Typically, the tag key represents a category (such as \"environment\") and the tag value represents a specific value within that category (such as \"test,\" \"development,\" or \"production\"). You can add up to 50 tags to each container. For more information about tagging, including naming and usage conventions, see Tagging Resources in MediaStore.

", "ListTagsForResourceOutput$Tags": "

An array of key:value pairs that are assigned to the container.

", "TagResourceInput$Tags": "

An array of key:value pairs that you want to add to the container. You need to specify only the tags that you want to add or update. For example, suppose a container already has two tags (customer:CompanyA and priority:High). You want to change the priority tag and also add a third tag (type:Contract). For TagResource, you specify the following tags: priority:Medium, type:Contract. The result is that your container has three tags: customer:CompanyA, priority:Medium, and type:Contract.

" } diff --git a/models/apis/meteringmarketplace/2016-01-14/api-2.json b/models/apis/meteringmarketplace/2016-01-14/api-2.json index 1aa1635c974..321b693d018 100644 --- a/models/apis/meteringmarketplace/2016-01-14/api-2.json +++ b/models/apis/meteringmarketplace/2016-01-14/api-2.json @@ -46,7 +46,8 @@ {"shape":"InvalidEndpointRegionException"}, {"shape":"TimestampOutOfBoundsException"}, {"shape":"DuplicateRequestException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"CustomerNotEntitledException"} ] }, "RegisterUsage":{ diff --git a/models/apis/meteringmarketplace/2016-01-14/docs-2.json b/models/apis/meteringmarketplace/2016-01-14/docs-2.json index 56d38c9d0e9..86cfbf42dbb 100644 --- a/models/apis/meteringmarketplace/2016-01-14/docs-2.json +++ b/models/apis/meteringmarketplace/2016-01-14/docs-2.json @@ -1,10 +1,10 @@ { "version": "2.0", - "service": "AWS Marketplace Metering Service

This reference provides descriptions of the low-level AWS Marketplace Metering Service API.

AWS Marketplace sellers can use this API to submit usage data for custom usage dimensions.

Submitting Metering Records

Accepting New Customers

Entitlement and Metering for Paid Container Products

BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to verify that the SaaS metering records that you sent are accurate by searching for records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit records over time. For more information, see the AWS CloudTrail User Guide .

", + "service": "AWS Marketplace Metering Service

This reference provides descriptions of the low-level AWS Marketplace Metering Service API.

AWS Marketplace sellers can use this API to submit usage data for custom usage dimensions.

Submitting Metering Records

Accepting New Customers

Entitlement and Metering for Paid Container Products

BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to verify that the SaaS metering records that you sent are accurate by searching for records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit records over time. For more information, see the AWS CloudTrail User Guide .

", "operations": { "BatchMeterUsage": "

BatchMeterUsage is called from a SaaS application listed on the AWS Marketplace to post metering records for a set of customers.

For identical requests, the API is idempotent; requests can be retried with the same records or a subset of the input records.

Every request to BatchMeterUsage is for one product. If you need to meter usage for multiple products, you must make multiple calls to BatchMeterUsage.

BatchMeterUsage can process up to 25 UsageRecords at a time.

", "MeterUsage": "

API to emit metering records. For identical requests, the API is idempotent. It simply returns the metering record ID.

MeterUsage is authenticated on the buyer's AWS account, generally when running from an EC2 instance on the AWS Marketplace.

", - "RegisterUsage": "

Paid container software products sold through AWS Marketplace must integrate with the AWS Marketplace Metering Service and call the RegisterUsage operation for software entitlement and metering. Calling RegisterUsage from containers running outside of ECS is not currently supported. Free and BYOL products for ECS aren't required to call RegisterUsage, but you may choose to do so if you would like to receive usage data in your seller reports. The sections below explain the behavior of RegisterUsage. RegisterUsage performs two primary functions: metering and entitlement.

", + "RegisterUsage": "

Paid container software products sold through AWS Marketplace must integrate with the AWS Marketplace Metering Service and call the RegisterUsage operation for software entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call RegisterUsage, but you may choose to do so if you would like to receive usage data in your seller reports. The sections below explain the behavior of RegisterUsage. RegisterUsage performs two primary functions: metering and entitlement.

", "ResolveCustomer": "

ResolveCustomer is called by a SaaS application during the registration process. When a buyer visits your website during the registration process, the buyer submits a registration token through their browser. The registration token is resolved through this API to obtain a CustomerIdentifier and product code.

" }, "shapes": { diff --git a/models/apis/migrationhub-config/2019-06-30/api-2.json b/models/apis/migrationhub-config/2019-06-30/api-2.json new file mode 100644 index 00000000000..f2ea8f956b7 --- /dev/null +++ b/models/apis/migrationhub-config/2019-06-30/api-2.json @@ -0,0 +1,207 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-06-30", + "endpointPrefix":"migrationhub-config", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Migration Hub Config", + "serviceId":"MigrationHub Config", + "signatureVersion":"v4", + "signingName":"mgh", + "targetPrefix":"AWSMigrationHubMultiAccountService", + "uid":"migrationhub-config-2019-06-30" + }, + "operations":{ + "CreateHomeRegionControl":{ + "name":"CreateHomeRegionControl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateHomeRegionControlRequest"}, + "output":{"shape":"CreateHomeRegionControlResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DryRunOperation"}, + {"shape":"InvalidInputException"} + ] + }, + "DescribeHomeRegionControls":{ + "name":"DescribeHomeRegionControls", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeHomeRegionControlsRequest"}, + "output":{"shape":"DescribeHomeRegionControlsResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"} + ] + }, + "GetHomeRegion":{ + "name":"GetHomeRegion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetHomeRegionRequest"}, + "output":{"shape":"GetHomeRegionResult"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidInputException"} + ] + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ControlId":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^hrc-[a-z0-9]{12}$" + }, + "CreateHomeRegionControlRequest":{ + "type":"structure", + "required":[ + "HomeRegion", + "Target" + ], + "members":{ + "HomeRegion":{"shape":"HomeRegion"}, + "Target":{"shape":"Target"}, + "DryRun":{"shape":"DryRun"} + } + }, + "CreateHomeRegionControlResult":{ + "type":"structure", + "members":{ + "HomeRegionControl":{"shape":"HomeRegionControl"} + } + }, + "DescribeHomeRegionControlsMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "DescribeHomeRegionControlsRequest":{ + "type":"structure", + "members":{ + "ControlId":{"shape":"ControlId"}, + "HomeRegion":{"shape":"HomeRegion"}, + "Target":{"shape":"Target"}, + "MaxResults":{"shape":"DescribeHomeRegionControlsMaxResults"}, + "NextToken":{"shape":"Token"} + } + }, + "DescribeHomeRegionControlsResult":{ + "type":"structure", + "members":{ + "HomeRegionControls":{"shape":"HomeRegionControls"}, + "NextToken":{"shape":"Token"} + } + }, + "DryRun":{"type":"boolean"}, + "DryRunOperation":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "ErrorMessage":{"type":"string"}, + "GetHomeRegionRequest":{ + "type":"structure", + "members":{ + } + }, + "GetHomeRegionResult":{ + "type":"structure", + "members":{ + "HomeRegion":{"shape":"HomeRegion"} + } + }, + "HomeRegion":{ + "type":"string", + "max":50, + "min":1, + "pattern":"^([a-z]+)-([a-z]+)-([0-9]+)$" + }, + "HomeRegionControl":{ + "type":"structure", + "members":{ + "ControlId":{"shape":"ControlId"}, + "HomeRegion":{"shape":"HomeRegion"}, + "Target":{"shape":"Target"}, + "RequestedTime":{"shape":"RequestedTime"} + } + }, + "HomeRegionControls":{ + "type":"list", + "member":{"shape":"HomeRegionControl"}, + "max":100 + }, + "InternalServerError":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "InvalidInputException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true + }, + "RequestedTime":{"type":"timestamp"}, + "ServiceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "exception":true, + "fault":true + }, + "Target":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{"shape":"TargetType"}, + "Id":{"shape":"TargetId"} + } + }, + "TargetId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^\\d{12}$" + }, + "TargetType":{ + "type":"string", + "enum":["ACCOUNT"] + }, + "Token":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"^[a-zA-Z0-9\\/\\+\\=]{0,2048}$" + } + } +} diff --git a/models/apis/migrationhub-config/2019-06-30/docs-2.json b/models/apis/migrationhub-config/2019-06-30/docs-2.json new file mode 100644 index 00000000000..e6e91ce1d19 --- /dev/null +++ b/models/apis/migrationhub-config/2019-06-30/docs-2.json @@ -0,0 +1,150 @@ +{ + "version": "2.0", + "service": "

The AWS Migration Hub home region APIs are available specifically for working with your Migration Hub home region. You can use these APIs to determine a home region, as well as to create and work with controls that describe the home region.

You can use these APIs within your home region only. If you call these APIs from outside your home region, your calls are rejected, except for the ability to register your agents and connectors.

You must call GetHomeRegion at least once before you call any other AWS Application Discovery Service and AWS Migration Hub APIs, to obtain the account's Migration Hub home region.

The StartDataCollection API call in AWS Application Discovery Service allows your agents and connectors to begin collecting data that flows directly into the home region, and it will prevent you from enabling data collection information to be sent outside the home region.

For specific API usage, see the sections that follow in this AWS Migration Hub Home Region API reference.

The Migration Hub Home Region APIs do not support AWS Organizations.

", + "operations": { + "CreateHomeRegionControl": "

This API sets up the home region for the calling account only.

", + "DescribeHomeRegionControls": "

This API permits filtering on the ControlId, HomeRegion, and RegionControlScope fields.

", + "GetHomeRegion": "

Returns the calling account’s home region, if configured. This API is used by other AWS services to determine the regional endpoint for calling AWS Application Discovery Service and Migration Hub. You must call GetHomeRegion at least once before you call any other AWS Application Discovery Service and AWS Migration Hub APIs, to obtain the account's Migration Hub home region.

" + }, + "shapes": { + "AccessDeniedException": { + "base": "

You do not have sufficient access to perform this action.

", + "refs": { + } + }, + "ControlId": { + "base": null, + "refs": { + "DescribeHomeRegionControlsRequest$ControlId": "

The ControlID is a unique identifier string of your HomeRegionControl object.

", + "HomeRegionControl$ControlId": "

A unique identifier that's generated for each home region control. It's always a string that begins with \"hrc-\" followed by 12 lowercase letters and numbers.

" + } + }, + "CreateHomeRegionControlRequest": { + "base": null, + "refs": { + } + }, + "CreateHomeRegionControlResult": { + "base": null, + "refs": { + } + }, + "DescribeHomeRegionControlsMaxResults": { + "base": null, + "refs": { + "DescribeHomeRegionControlsRequest$MaxResults": "

The maximum number of filtering results to display per page.

" + } + }, + "DescribeHomeRegionControlsRequest": { + "base": null, + "refs": { + } + }, + "DescribeHomeRegionControlsResult": { + "base": null, + "refs": { + } + }, + "DryRun": { + "base": null, + "refs": { + "CreateHomeRegionControlRequest$DryRun": "

Optional Boolean flag to indicate whether any effect should take place. It tests whether the caller has permission to make the call.

" + } + }, + "DryRunOperation": { + "base": "

Exception raised to indicate that authorization of an action was successful, when the DryRun flag is set to true.

", + "refs": { + } + }, + "ErrorMessage": { + "base": null, + "refs": { + "AccessDeniedException$Message": null, + "DryRunOperation$Message": null, + "InternalServerError$Message": null, + "InvalidInputException$Message": null, + "ServiceUnavailableException$Message": null + } + }, + "GetHomeRegionRequest": { + "base": null, + "refs": { + } + }, + "GetHomeRegionResult": { + "base": null, + "refs": { + } + }, + "HomeRegion": { + "base": null, + "refs": { + "CreateHomeRegionControlRequest$HomeRegion": "

The name of the home region of the calling account.

", + "DescribeHomeRegionControlsRequest$HomeRegion": "

The name of the home region you'd like to view.

", + "GetHomeRegionResult$HomeRegion": "

The name of the home region of the calling account.

", + "HomeRegionControl$HomeRegion": "

The AWS Region that's been set as home region. For example, \"us-west-2\" or \"eu-central-1\" are valid home regions.

" + } + }, + "HomeRegionControl": { + "base": "

A home region control is an object that specifies the home region for an account, with some additional information. It contains a target (always of type ACCOUNT), an ID, and a time at which the home region was set.

", + "refs": { + "CreateHomeRegionControlResult$HomeRegionControl": "

This object is the HomeRegionControl object that's returned by a successful call to CreateHomeRegionControl.

", + "HomeRegionControls$member": null + } + }, + "HomeRegionControls": { + "base": null, + "refs": { + "DescribeHomeRegionControlsResult$HomeRegionControls": "

An array that contains your HomeRegionControl objects.

" + } + }, + "InternalServerError": { + "base": "

Exception raised when an internal, configuration, or dependency error is encountered.

", + "refs": { + } + }, + "InvalidInputException": { + "base": "

Exception raised when the provided input violates a policy constraint or is entered in the wrong format or data type.

", + "refs": { + } + }, + "RequestedTime": { + "base": null, + "refs": { + "HomeRegionControl$RequestedTime": "

A timestamp representing the time when the customer called CreateHomeregionControl and set the home region for the account.

" + } + }, + "ServiceUnavailableException": { + "base": "

Exception raised when a request fails due to temporary unavailability of the service.

", + "refs": { + } + }, + "Target": { + "base": "

The target parameter specifies the identifier to which the home region is applied, which is always an ACCOUNT. It applies the home region to the current ACCOUNT.

", + "refs": { + "CreateHomeRegionControlRequest$Target": "

The account for which this command sets up a home region control. The Target is always of type ACCOUNT.

", + "DescribeHomeRegionControlsRequest$Target": "

The target parameter specifies the identifier to which the home region is applied, which is always of type ACCOUNT. It applies the home region to the current ACCOUNT.

", + "HomeRegionControl$Target": "

The target parameter specifies the identifier to which the home region is applied, which is always an ACCOUNT. It applies the home region to the current ACCOUNT.

" + } + }, + "TargetId": { + "base": null, + "refs": { + "Target$Id": "

The TargetID is a 12-character identifier of the ACCOUNT for which the control was created. (This must be the current account.)

" + } + }, + "TargetType": { + "base": null, + "refs": { + "Target$Type": "

The target type is always an ACCOUNT.

" + } + }, + "Token": { + "base": null, + "refs": { + "DescribeHomeRegionControlsRequest$NextToken": "

If a NextToken was returned by a previous call, more results are available. To retrieve the next page of results, make the call again using the returned token in NextToken.

", + "DescribeHomeRegionControlsResult$NextToken": "

If a NextToken was returned by a previous call, more results are available. To retrieve the next page of results, make the call again using the returned token in NextToken.

" + } + } + } +} diff --git a/models/apis/migrationhub-config/2019-06-30/examples-1.json b/models/apis/migrationhub-config/2019-06-30/examples-1.json new file mode 100644 index 00000000000..0ea7e3b0bbe --- /dev/null +++ b/models/apis/migrationhub-config/2019-06-30/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/models/apis/migrationhub-config/2019-06-30/paginators-1.json b/models/apis/migrationhub-config/2019-06-30/paginators-1.json new file mode 100644 index 00000000000..cd89e068e4d --- /dev/null +++ b/models/apis/migrationhub-config/2019-06-30/paginators-1.json @@ -0,0 +1,9 @@ +{ + "pagination": { + "DescribeHomeRegionControls": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } + } +} diff --git a/models/apis/personalize/2018-05-22/api-2.json b/models/apis/personalize/2018-05-22/api-2.json index ec99908f0ba..59a15d623a0 100644 --- a/models/apis/personalize/2018-05-22/api-2.json +++ b/models/apis/personalize/2018-05-22/api-2.json @@ -13,6 +13,22 @@ "uid":"personalize-2018-05-22" }, "operations":{ + "CreateBatchInferenceJob":{ + "name":"CreateBatchInferenceJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateBatchInferenceJobRequest"}, + "output":{"shape":"CreateBatchInferenceJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceInUseException"} + ] + }, "CreateCampaign":{ "name":"CreateCampaign", "http":{ @@ -237,6 +253,20 @@ ], "idempotent":true }, + "DescribeBatchInferenceJob":{ + "name":"DescribeBatchInferenceJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeBatchInferenceJobRequest"}, + "output":{"shape":"DescribeBatchInferenceJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "idempotent":true + }, "DescribeCampaign":{ "name":"DescribeCampaign", "http":{ @@ -391,6 +421,20 @@ {"shape":"ResourceInUseException"} ] }, + "ListBatchInferenceJobs":{ + "name":"ListBatchInferenceJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListBatchInferenceJobsRequest"}, + "output":{"shape":"ListBatchInferenceJobsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InvalidNextTokenException"} + ], + "idempotent":true + }, "ListCampaigns":{ "name":"ListCampaigns", "http":{ @@ -586,6 +630,52 @@ "type":"string", "max":10000 }, + "BatchInferenceJob":{ + "type":"structure", + "members":{ + "jobName":{"shape":"Name"}, + "batchInferenceJobArn":{"shape":"Arn"}, + "failureReason":{"shape":"FailureReason"}, + "solutionVersionArn":{"shape":"Arn"}, + "numResults":{"shape":"NumBatchResults"}, + "jobInput":{"shape":"BatchInferenceJobInput"}, + "jobOutput":{"shape":"BatchInferenceJobOutput"}, + "roleArn":{"shape":"RoleArn"}, + "status":{"shape":"Status"}, + "creationDateTime":{"shape":"Date"}, + "lastUpdatedDateTime":{"shape":"Date"} + } + }, + "BatchInferenceJobInput":{ + "type":"structure", + "required":["s3DataSource"], + "members":{ + "s3DataSource":{"shape":"S3DataConfig"} + } + }, + "BatchInferenceJobOutput":{ + "type":"structure", + "required":["s3DataDestination"], + "members":{ + "s3DataDestination":{"shape":"S3DataConfig"} + } + }, + "BatchInferenceJobSummary":{ + "type":"structure", + "members":{ + "batchInferenceJobArn":{"shape":"Arn"}, + "jobName":{"shape":"Name"}, + "status":{"shape":"Status"}, + "creationDateTime":{"shape":"Date"}, + "lastUpdatedDateTime":{"shape":"Date"}, + "failureReason":{"shape":"FailureReason"} + } + }, + "BatchInferenceJobs":{ + "type":"list", + "member":{"shape":"BatchInferenceJobSummary"}, + "max":100 + }, "Boolean":{"type":"boolean"}, "Campaign":{ "type":"structure", @@ -670,6 +760,30 @@ "type":"double", "min":-1000000 }, + "CreateBatchInferenceJobRequest":{ + "type":"structure", + "required":[ + "jobName", + "solutionVersionArn", + "jobInput", + "jobOutput", + "roleArn" + ], + "members":{ + "jobName":{"shape":"Name"}, + "solutionVersionArn":{"shape":"Arn"}, + "numResults":{"shape":"NumBatchResults"}, + "jobInput":{"shape":"BatchInferenceJobInput"}, + "jobOutput":{"shape":"BatchInferenceJobOutput"}, + "roleArn":{"shape":"RoleArn"} + } + }, + "CreateBatchInferenceJobResponse":{ + "type":"structure", + "members":{ + "batchInferenceJobArn":{"shape":"Arn"} + } + }, "CreateCampaignRequest":{ "type":"structure", "required":[ @@ -1039,6 +1153,19 @@ "algorithm":{"shape":"Algorithm"} } }, + "DescribeBatchInferenceJobRequest":{ + "type":"structure", + "required":["batchInferenceJobArn"], + "members":{ + "batchInferenceJobArn":{"shape":"Arn"} + } + }, + "DescribeBatchInferenceJobResponse":{ + "type":"structure", + "members":{ + "batchInferenceJob":{"shape":"BatchInferenceJob"} + } + }, "DescribeCampaignRequest":{ "type":"structure", "required":["campaignArn"], @@ -1337,6 +1464,21 @@ }, "exception":true }, + "ListBatchInferenceJobsRequest":{ + "type":"structure", + "members":{ + "solutionVersionArn":{"shape":"Arn"}, + "nextToken":{"shape":"NextToken"}, + "maxResults":{"shape":"MaxResults"} + } + }, + "ListBatchInferenceJobsResponse":{ + "type":"structure", + "members":{ + "batchInferenceJobs":{"shape":"BatchInferenceJobs"}, + "nextToken":{"shape":"NextToken"} + } + }, "ListCampaignsRequest":{ "type":"structure", "members":{ @@ -1500,6 +1642,7 @@ "type":"string", "max":1300 }, + "NumBatchResults":{"type":"integer"}, "ParameterName":{ "type":"string", "max":256 @@ -1579,6 +1722,14 @@ "max":256, "pattern":"arn:([a-z\\d-]+):iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" }, + "S3DataConfig":{ + "type":"structure", + "required":["path"], + "members":{ + "path":{"shape":"S3Location"}, + "kmsKeyArn":{"shape":"KmsKeyArn"} + } + }, "S3Location":{ "type":"string", "max":256 diff --git a/models/apis/personalize/2018-05-22/docs-2.json b/models/apis/personalize/2018-05-22/docs-2.json index 7e6ba248d29..2a36836043a 100644 --- a/models/apis/personalize/2018-05-22/docs-2.json +++ b/models/apis/personalize/2018-05-22/docs-2.json @@ -2,6 +2,7 @@ "version": "2.0", "service": "

Amazon Personalize is a machine learning service that makes it easy to add individualized recommendations to customers.

", "operations": { + "CreateBatchInferenceJob": "

Creates a batch inference job. The operation can handle up to 50 million records and the input file must be in JSON format. For more information, see recommendations-batch.

", "CreateCampaign": "

Creates a campaign by deploying a solution version. When a client calls the GetRecommendations and GetPersonalizedRanking APIs, a campaign is specified in the request.

Minimum Provisioned TPS and Auto-Scaling

A transaction is a single GetRecommendations or GetPersonalizedRanking call. Transactions per second (TPS) is the throughput and unit of billing for Amazon Personalize. The minimum provisioned TPS (minProvisionedTPS) specifies the baseline throughput provisioned by Amazon Personalize, and thus, the minimum billing charge. If your TPS increases beyond minProvisionedTPS, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minProvisionedTPS, to maintain a 70% utilization. There's a short time delay while the capacity is increased that might cause loss of transactions. It's recommended to start with a low minProvisionedTPS, track your usage using Amazon CloudWatch metrics, and then increase the minProvisionedTPS as necessary.

Status

A campaign can be in one of the following states:

To get the campaign status, call DescribeCampaign.

Wait until the status of the campaign is ACTIVE before asking the campaign for recommendations.

Related APIs

", "CreateDataset": "

Creates an empty dataset and adds it to the specified dataset group. Use CreateDatasetImportJob to import your training data to a dataset.

There are three types of datasets:

Each dataset type has an associated schema with required field types. Only the Interactions dataset is required in order to train a model (also referred to as creating a solution).

A dataset can be in one of the following states:

To get the status of the dataset, call DescribeDataset.

Related APIs

", "CreateDatasetGroup": "

Creates an empty dataset group. A dataset group contains related datasets that supply data for training a model. A dataset group can contain at most three datasets, one for each type of dataset:

To train a model (create a solution), a dataset group that contains an Interactions dataset is required. Call CreateDataset to add a dataset to the group.

A dataset group can be in one of the following states:

To get the status of the dataset group, call DescribeDatasetGroup. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the creation failed.

You must wait until the status of the dataset group is ACTIVE before adding a dataset to the group.

You can specify an AWS Key Management Service (KMS) key to encrypt the datasets in the group. If you specify a KMS key, you must also include an AWS Identity and Access Management (IAM) role that has permission to access the key.

APIs that require a dataset group ARN in the request

Related APIs

", @@ -17,6 +18,7 @@ "DeleteSchema": "

Deletes a schema. Before deleting a schema, you must delete all datasets referencing the schema. For more information on schemas, see CreateSchema.

", "DeleteSolution": "

Deletes all versions of a solution and the Solution object itself. Before deleting a solution, you must delete all campaigns based on the solution. To determine what campaigns are using the solution, call ListCampaigns and supply the Amazon Resource Name (ARN) of the solution. You can't delete a solution if an associated SolutionVersion is in the CREATE PENDING or IN PROGRESS state. For more information on solutions, see CreateSolution.

", "DescribeAlgorithm": "

Describes the given algorithm.

", + "DescribeBatchInferenceJob": "

Gets the properties of a batch inference job including name, Amazon Resource Name (ARN), status, input and output configurations, and the ARN of the solution version used to generate the recommendations.

", "DescribeCampaign": "

Describes the given campaign, including its status.

A campaign can be in one of the following states:

When the status is CREATE FAILED, the response includes the failureReason key, which describes why.

For more information on campaigns, see CreateCampaign.

", "DescribeDataset": "

Describes the given dataset. For more information on datasets, see CreateDataset.

", "DescribeDatasetGroup": "

Describes the given dataset group. For more information on dataset groups, see CreateDatasetGroup.

", @@ -28,6 +30,7 @@ "DescribeSolution": "

Describes a solution. For more information on solutions, see CreateSolution.

", "DescribeSolutionVersion": "

Describes a specific version of a solution. For more information on solutions, see CreateSolution.

", "GetSolutionMetrics": "

Gets the metrics for the specified solution version.

", + "ListBatchInferenceJobs": "

Gets a list of the batch inference jobs that have been performed off of a solution version.

", "ListCampaigns": "

Returns a list of campaigns that use the given solution. When a solution is not specified, all the campaigns associated with the account are listed. The response provides the properties for each campaign, including the Amazon Resource Name (ARN). For more information on campaigns, see CreateCampaign.

", "ListDatasetGroups": "

Returns a list of dataset groups. The response provides the properties for each dataset group, including the Amazon Resource Name (ARN). For more information on dataset groups, see CreateDatasetGroup.

", "ListDatasetImportJobs": "

Returns a list of dataset import jobs that use the given dataset. When a dataset is not specified, all the dataset import jobs associated with the account are listed. The response provides the properties for each dataset import job, including the Amazon Resource Name (ARN). For more information on dataset import jobs, see CreateDatasetImportJob. For more information on datasets, see CreateDataset.

", @@ -65,10 +68,15 @@ "Algorithm$roleArn": "

The Amazon Resource Name (ARN) of the role.

", "ArnList$member": null, "AutoMLResult$bestRecipeArn": "

The Amazon Resource Name (ARN) of the best recipe.

", + "BatchInferenceJob$batchInferenceJobArn": "

The Amazon Resource Name (ARN) of the batch inference job.

", + "BatchInferenceJob$solutionVersionArn": "

The Amazon Resource Name (ARN) of the solution version from which the batch inference job was created.

", + "BatchInferenceJobSummary$batchInferenceJobArn": "

The Amazon Resource Name (ARN) of the batch inference job.

", "Campaign$campaignArn": "

The Amazon Resource Name (ARN) of the campaign.

", "Campaign$solutionVersionArn": "

The Amazon Resource Name (ARN) of a specific version of the solution.

", "CampaignSummary$campaignArn": "

The Amazon Resource Name (ARN) of the campaign.

", "CampaignUpdateSummary$solutionVersionArn": "

The Amazon Resource Name (ARN) of the deployed solution version.

", + "CreateBatchInferenceJobRequest$solutionVersionArn": "

The Amazon Resource Name (ARN) of the solution version that will be used to generate the batch inference recommendations.

", + "CreateBatchInferenceJobResponse$batchInferenceJobArn": "

The ARN of the batch inference job.

", "CreateCampaignRequest$solutionVersionArn": "

The Amazon Resource Name (ARN) of the solution version to deploy.

", "CreateCampaignResponse$campaignArn": "

The Amazon Resource Name (ARN) of the campaign.

", "CreateDatasetGroupResponse$datasetGroupArn": "

The Amazon Resource Name (ARN) of the new dataset group.

", @@ -104,6 +112,7 @@ "DeleteSchemaRequest$schemaArn": "

The Amazon Resource Name (ARN) of the schema to delete.

", "DeleteSolutionRequest$solutionArn": "

The ARN of the solution to delete.

", "DescribeAlgorithmRequest$algorithmArn": "

The Amazon Resource Name (ARN) of the algorithm to describe.

", + "DescribeBatchInferenceJobRequest$batchInferenceJobArn": "

The ARN of the batch inference job to describe.

", "DescribeCampaignRequest$campaignArn": "

The Amazon Resource Name (ARN) of the campaign.

", "DescribeDatasetGroupRequest$datasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group to describe.

", "DescribeDatasetImportJobRequest$datasetImportJobArn": "

The Amazon Resource Name (ARN) of the dataset import job to describe.

", @@ -120,6 +129,7 @@ "FeatureTransformation$featureTransformationArn": "

The Amazon Resource Name (ARN) of the FeatureTransformation object.

", "GetSolutionMetricsRequest$solutionVersionArn": "

The Amazon Resource Name (ARN) of the solution version for which to get metrics.

", "GetSolutionMetricsResponse$solutionVersionArn": "

The same solution version ARN as specified in the request.

", + "ListBatchInferenceJobsRequest$solutionVersionArn": "

The Amazon Resource Name (ARN) of the solution version from which the batch inference jobs were created.

", "ListCampaignsRequest$solutionArn": "

The Amazon Resource Name (ARN) of the solution to list the campaigns for. When a solution is not specified, all the campaigns associated with the account are listed.

", "ListDatasetImportJobsRequest$datasetArn": "

The Amazon Resource Name (ARN) of the dataset to list the dataset import jobs for.

", "ListDatasetsRequest$datasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group that contains the datasets to list.

", @@ -169,6 +179,38 @@ "DatasetSchema$schema": "

The schema.

" } }, + "BatchInferenceJob": { + "base": "

Contains information on a batch inference job.

", + "refs": { + "DescribeBatchInferenceJobResponse$batchInferenceJob": "

Information on the specified batch inference job.

" + } + }, + "BatchInferenceJobInput": { + "base": "

The input configuration of a batch inference job.

", + "refs": { + "BatchInferenceJob$jobInput": "

The Amazon S3 path that leads to the input data used to generate the batch inference job.

", + "CreateBatchInferenceJobRequest$jobInput": "

The Amazon S3 path that leads to the input file to base your recommendations on. The input material must be in JSON format.

" + } + }, + "BatchInferenceJobOutput": { + "base": "

The output configuration parameters of a batch inference job.

", + "refs": { + "BatchInferenceJob$jobOutput": "

The Amazon S3 bucket that contains the output data generated by the batch inference job.

", + "CreateBatchInferenceJobRequest$jobOutput": "

The path to the Amazon S3 bucket where the job's output will be stored.

" + } + }, + "BatchInferenceJobSummary": { + "base": "

A truncated version of the BatchInferenceJob datatype. The ListBatchInferenceJobs operation returns a list of batch inference job summaries.

", + "refs": { + "BatchInferenceJobs$member": null + } + }, + "BatchInferenceJobs": { + "base": null, + "refs": { + "ListBatchInferenceJobsResponse$batchInferenceJobs": "

A list containing information on each job that is returned.

" + } + }, "Boolean": { "base": null, "refs": { @@ -250,6 +292,16 @@ "DefaultContinuousHyperParameterRange$minValue": "

The minimum allowable value for the hyperparameter.

" } }, + "CreateBatchInferenceJobRequest": { + "base": null, + "refs": { + } + }, + "CreateBatchInferenceJobResponse": { + "base": null, + "refs": { + } + }, "CreateCampaignRequest": { "base": null, "refs": { @@ -416,6 +468,10 @@ "refs": { "Algorithm$creationDateTime": "

The date and time (in Unix time) that the algorithm was created.

", "Algorithm$lastUpdatedDateTime": "

The date and time (in Unix time) that the algorithm was last updated.

", + "BatchInferenceJob$creationDateTime": "

The time at which the batch inference job was created.

", + "BatchInferenceJob$lastUpdatedDateTime": "

The time at which the batch inference job was last updated.

", + "BatchInferenceJobSummary$creationDateTime": "

The time at which the batch inference job was created.

", + "BatchInferenceJobSummary$lastUpdatedDateTime": "

The time at which the batch inference job was last updated.

", "Campaign$creationDateTime": "

The date and time (in Unix format) that the campaign was created.

", "Campaign$lastUpdatedDateTime": "

The date and time (in Unix format) that the campaign was last updated.

", "CampaignSummary$creationDateTime": "

The date and time (in Unix time) that the campaign was created.

", @@ -540,6 +596,16 @@ "refs": { } }, + "DescribeBatchInferenceJobRequest": { + "base": null, + "refs": { + } + }, + "DescribeBatchInferenceJobResponse": { + "base": null, + "refs": { + } + }, "DescribeCampaignRequest": { "base": null, "refs": { @@ -698,6 +764,8 @@ "FailureReason": { "base": null, "refs": { + "BatchInferenceJob$failureReason": "

If the batch inference job failed, the reason for the failure.

", + "BatchInferenceJobSummary$failureReason": "

If the batch inference job failed, the reason for the failure.

", "Campaign$failureReason": "

If a campaign fails, the reason behind the failure.

", "CampaignSummary$failureReason": "

If a campaign fails, the reason behind the failure.

", "CampaignUpdateSummary$failureReason": "

If a campaign update fails, the reason behind the failure.

", @@ -740,7 +808,7 @@ "HPOConfig": { "base": "

Describes the properties for hyperparameter optimization (HPO). For use with the bring-your-own-recipe feature. Do not use for Amazon Personalize native recipes.

", "refs": { - "SolutionConfig$hpoConfig": "

Describes the properties for hyperparameter optimization (HPO). For use with the bring-your-own-recipe feature. Not used with Amazon Personalize predefined recipes.

" + "SolutionConfig$hpoConfig": "

Describes the properties for hyperparameter optimization (HPO).

" } }, "HPOObjective": { @@ -821,7 +889,8 @@ "base": null, "refs": { "CreateDatasetGroupRequest$kmsKeyArn": "

The Amazon Resource Name (ARN) of a KMS key used to encrypt the datasets.

", - "DatasetGroup$kmsKeyArn": "

The Amazon Resource Name (ARN) of the KMS key used to encrypt the datasets.

" + "DatasetGroup$kmsKeyArn": "

The Amazon Resource Name (ARN) of the KMS key used to encrypt the datasets.

", + "S3DataConfig$kmsKeyArn": "

The Amazon Resource Name (ARN) of the Amazon Key Management Service (KMS) key that Amazon Personalize uses to encrypt or decrypt the input and output files of a batch inference job.

" } }, "LimitExceededException": { @@ -829,6 +898,16 @@ "refs": { } }, + "ListBatchInferenceJobsRequest": { + "base": null, + "refs": { + } + }, + "ListBatchInferenceJobsResponse": { + "base": null, + "refs": { + } + }, "ListCampaignsRequest": { "base": null, "refs": { @@ -922,6 +1001,7 @@ "MaxResults": { "base": null, "refs": { + "ListBatchInferenceJobsRequest$maxResults": "

The maximum number of batch inference job results to return in each page. The default value is 100.

", "ListCampaignsRequest$maxResults": "

The maximum number of campaigns to return.

", "ListDatasetGroupsRequest$maxResults": "

The maximum number of dataset groups to return.

", "ListDatasetImportJobsRequest$maxResults": "

The maximum number of dataset import jobs to return.

", @@ -964,8 +1044,11 @@ "refs": { "Algorithm$name": "

The name of the algorithm.

", "AlgorithmImage$name": "

The name of the algorithm image.

", + "BatchInferenceJob$jobName": "

The name of the batch inference job.

", + "BatchInferenceJobSummary$jobName": "

The name of the batch inference job.

", "Campaign$name": "

The name of the campaign.

", "CampaignSummary$name": "

The name of the campaign.

", + "CreateBatchInferenceJobRequest$jobName": "

The name of the batch inference job to create.

", "CreateCampaignRequest$name": "

A name for the new campaign. The campaign name must be unique within your account.

", "CreateDatasetGroupRequest$name": "

The name for the new dataset group.

", "CreateDatasetImportJobRequest$jobName": "

The name for the dataset import job.

", @@ -993,6 +1076,8 @@ "NextToken": { "base": null, "refs": { + "ListBatchInferenceJobsRequest$nextToken": "

The token to request the next page of results.

", + "ListBatchInferenceJobsResponse$nextToken": "

The token to use to retreive the next page of results. The value is null when there are no more results to return.

", "ListCampaignsRequest$nextToken": "

A token returned from the previous call to ListCampaigns for getting the next set of campaigns (if they exist).

", "ListCampaignsResponse$nextToken": "

A token for getting the next set of campaigns (if they exist).

", "ListDatasetGroupsRequest$nextToken": "

A token returned from the previous call to ListDatasetGroups for getting the next set of dataset groups (if they exist).

", @@ -1013,6 +1098,13 @@ "ListSolutionsResponse$nextToken": "

A token for getting the next set of solutions (if they exist).

" } }, + "NumBatchResults": { + "base": null, + "refs": { + "BatchInferenceJob$numResults": "

The number of recommendations generated by the batch inference job. This number includes the error messages generated for failed input records.

", + "CreateBatchInferenceJobRequest$numResults": "

The number of recommendations to retreive.

" + } + }, "ParameterName": { "base": null, "refs": { @@ -1106,15 +1198,25 @@ "RoleArn": { "base": null, "refs": { + "BatchInferenceJob$roleArn": "

The ARN of the Amazon Identity and Access Management (IAM) role that requested the batch inference job.

", + "CreateBatchInferenceJobRequest$roleArn": "

The ARN of the Amazon Identity and Access Management role that has permissions to read and write to your input and out Amazon S3 buckets respectively.

", "CreateDatasetGroupRequest$roleArn": "

The ARN of the IAM role that has permissions to access the KMS key. Supplying an IAM role is only valid when also specifying a KMS key.

", "CreateDatasetImportJobRequest$roleArn": "

The ARN of the IAM role that has permissions to read from the Amazon S3 data source.

", "DatasetGroup$roleArn": "

The ARN of the IAM role that has permissions to create the dataset group.

" } }, + "S3DataConfig": { + "base": "

The configuration details of an Amazon S3 input or output bucket.

", + "refs": { + "BatchInferenceJobInput$s3DataSource": "

The URI of the Amazon S3 location that contains your input data. The Amazon S3 bucket must be in the same region as the API endpoint you are calling.

", + "BatchInferenceJobOutput$s3DataDestination": "

Information on the Amazon S3 bucket in which the batch inference job's output is stored.

" + } + }, "S3Location": { "base": null, "refs": { - "DataSource$dataLocation": "

The path to the Amazon S3 bucket where the data that you want to upload to your dataset is stored. For example:

s3://bucket-name/training-data.csv

" + "DataSource$dataLocation": "

The path to the Amazon S3 bucket where the data that you want to upload to your dataset is stored. For example:

s3://bucket-name/training-data.csv

", + "S3DataConfig$path": "

The file path of the Amazon S3 bucket.

" } }, "Schemas": { @@ -1171,6 +1273,8 @@ "Status": { "base": null, "refs": { + "BatchInferenceJob$status": "

The status of the batch inference job. The status is one of the following values:

", + "BatchInferenceJobSummary$status": "

The status of the batch inference job. The status is one of the following values:

", "Campaign$status": "

The status of the campaign.

A campaign can be in one of the following states:

", "CampaignSummary$status": "

The status of the campaign.

A campaign can be in one of the following states:

", "CampaignUpdateSummary$status": "

The status of the campaign update.

A campaign update can be in one of the following states:

", diff --git a/models/apis/personalize/2018-05-22/paginators-1.json b/models/apis/personalize/2018-05-22/paginators-1.json index 69cd212a061..78f6ecfa8d2 100644 --- a/models/apis/personalize/2018-05-22/paginators-1.json +++ b/models/apis/personalize/2018-05-22/paginators-1.json @@ -1,5 +1,11 @@ { "pagination": { + "ListBatchInferenceJobs": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "batchInferenceJobs" + }, "ListCampaigns": { "input_token": "nextToken", "limit_key": "maxResults", diff --git a/models/apis/pinpoint/2016-12-01/api-2.json b/models/apis/pinpoint/2016-12-01/api-2.json index 009c8bd55e6..78536558c79 100644 --- a/models/apis/pinpoint/2016-12-01/api-2.json +++ b/models/apis/pinpoint/2016-12-01/api-2.json @@ -308,6 +308,37 @@ } ] }, + "CreateVoiceTemplate": { + "name": "CreateVoiceTemplate", + "http": { + "method": "POST", + "requestUri": "/v1/templates/{template-name}/voice", + "responseCode": 201 + }, + "input": { + "shape": "CreateVoiceTemplateRequest" + }, + "output": { + "shape": "CreateVoiceTemplateResponse" + }, + "errors": [ + { + "shape": "MethodNotAllowedException" + }, + { + "shape": "TooManyRequestsException" + }, + { + "shape": "BadRequestException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "ForbiddenException" + } + ] + }, "DeleteAdmChannel": { "name": "DeleteAdmChannel", "http": { @@ -988,6 +1019,40 @@ } ] }, + "DeleteVoiceTemplate": { + "name": "DeleteVoiceTemplate", + "http": { + "method": "DELETE", + "requestUri": "/v1/templates/{template-name}/voice", + "responseCode": 202 + }, + "input": { + "shape": "DeleteVoiceTemplateRequest" + }, + "output": { + "shape": "DeleteVoiceTemplateResponse" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "MethodNotAllowedException" + }, + { + "shape": "TooManyRequestsException" + } + ] + }, "GetAdmChannel": { "name": "GetAdmChannel", "http": { @@ -2382,6 +2447,40 @@ } ] }, + "GetVoiceTemplate": { + "name": "GetVoiceTemplate", + "http": { + "method": "GET", + "requestUri": "/v1/templates/{template-name}/voice", + "responseCode": 200 + }, + "input": { + "shape": "GetVoiceTemplateRequest" + }, + "output": { + "shape": "GetVoiceTemplateResponse" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "MethodNotAllowedException" + }, + { + "shape": "TooManyRequestsException" + } + ] + }, "ListJourneys": { "name": "ListJourneys", "http": { @@ -3375,6 +3474,40 @@ "shape": "TooManyRequestsException" } ] + }, + "UpdateVoiceTemplate": { + "name": "UpdateVoiceTemplate", + "http": { + "method": "PUT", + "requestUri": "/v1/templates/{template-name}/voice", + "responseCode": 202 + }, + "input": { + "shape": "UpdateVoiceTemplateRequest" + }, + "output": { + "shape": "UpdateVoiceTemplateResponse" + }, + "errors": [ + { + "shape": "BadRequestException" + }, + { + "shape": "InternalServerErrorException" + }, + { + "shape": "ForbiddenException" + }, + { + "shape": "NotFoundException" + }, + { + "shape": "MethodNotAllowedException" + }, + { + "shape": "TooManyRequestsException" + } + ] } }, "shapes": { @@ -3631,6 +3764,9 @@ "MediaUrl": { "shape": "__string" }, + "RawContent": { + "shape": "__string" + }, "Sound": { "shape": "__string" }, @@ -3996,6 +4132,9 @@ "ImageUrl": { "shape": "__string" }, + "RawContent": { + "shape": "__string" + }, "SmallImageIconUrl": { "shape": "__string" }, @@ -4880,6 +5019,36 @@ } } }, + "CreateVoiceTemplateRequest": { + "type": "structure", + "members": { + "TemplateName": { + "shape": "__string", + "location": "uri", + "locationName": "template-name" + }, + "VoiceTemplateRequest": { + "shape": "VoiceTemplateRequest" + } + }, + "required": [ + "TemplateName", + "VoiceTemplateRequest" + ], + "payload": "VoiceTemplateRequest" + }, + "CreateVoiceTemplateResponse": { + "type": "structure", + "members": { + "CreateTemplateMessageBody": { + "shape": "CreateTemplateMessageBody" + } + }, + "required": [ + "CreateTemplateMessageBody" + ], + "payload": "CreateTemplateMessageBody" + }, "DefaultMessage": { "type": "structure", "members": { @@ -5467,6 +5636,31 @@ ], "payload": "VoiceChannelResponse" }, + "DeleteVoiceTemplateRequest": { + "type": "structure", + "members": { + "TemplateName": { + "shape": "__string", + "location": "uri", + "locationName": "template-name" + } + }, + "required": [ + "TemplateName" + ] + }, + "DeleteVoiceTemplateResponse": { + "type": "structure", + "members": { + "MessageBody": { + "shape": "MessageBody" + } + }, + "required": [ + "MessageBody" + ], + "payload": "MessageBody" + }, "DeliveryStatus": { "type": "string", "enum": [ @@ -5647,6 +5841,9 @@ "EmailTemplateRequest": { "type": "structure", "members": { + "DefaultSubstitutions": { + "shape": "__string" + }, "HtmlPart": { "shape": "__string" }, @@ -5657,6 +5854,9 @@ "shape": "MapOf__string", "locationName": "tags" }, + "TemplateDescription": { + "shape": "__string" + }, "TextPart": { "shape": "__string" } @@ -5671,6 +5871,9 @@ "CreationDate": { "shape": "__string" }, + "DefaultSubstitutions": { + "shape": "__string" + }, "HtmlPart": { "shape": "__string" }, @@ -5684,6 +5887,9 @@ "shape": "MapOf__string", "locationName": "tags" }, + "TemplateDescription": { + "shape": "__string" + }, "TemplateName": { "shape": "__string" }, @@ -7743,6 +7949,31 @@ ], "payload": "VoiceChannelResponse" }, + "GetVoiceTemplateRequest": { + "type": "structure", + "members": { + "TemplateName": { + "shape": "__string", + "location": "uri", + "locationName": "template-name" + } + }, + "required": [ + "TemplateName" + ] + }, + "GetVoiceTemplateResponse": { + "type": "structure", + "members": { + "VoiceTemplateResponse": { + "shape": "VoiceTemplateResponse" + } + }, + "required": [ + "VoiceTemplateResponse" + ], + "payload": "VoiceTemplateResponse" + }, "HoldoutActivity": { "type": "structure", "members": { @@ -8612,12 +8843,18 @@ "Default": { "shape": "DefaultPushNotificationTemplate" }, + "DefaultSubstitutions": { + "shape": "__string" + }, "GCM": { "shape": "AndroidPushNotificationTemplate" }, "tags": { "shape": "MapOf__string", "locationName": "tags" + }, + "TemplateDescription": { + "shape": "__string" } } }, @@ -8642,6 +8879,9 @@ "Default": { "shape": "DefaultPushNotificationTemplate" }, + "DefaultSubstitutions": { + "shape": "__string" + }, "GCM": { "shape": "AndroidPushNotificationTemplate" }, @@ -8652,6 +8892,9 @@ "shape": "MapOf__string", "locationName": "tags" }, + "TemplateDescription": { + "shape": "__string" + }, "TemplateName": { "shape": "__string" }, @@ -8952,9 +9195,15 @@ "Body": { "shape": "__string" }, + "DefaultSubstitutions": { + "shape": "__string" + }, "tags": { "shape": "MapOf__string", "locationName": "tags" + }, + "TemplateDescription": { + "shape": "__string" } } }, @@ -8970,6 +9219,9 @@ "CreationDate": { "shape": "__string" }, + "DefaultSubstitutions": { + "shape": "__string" + }, "LastModifiedDate": { "shape": "__string" }, @@ -8977,6 +9229,9 @@ "shape": "MapOf__string", "locationName": "tags" }, + "TemplateDescription": { + "shape": "__string" + }, "TemplateName": { "shape": "__string" }, @@ -9492,6 +9747,9 @@ }, "SMSTemplate": { "shape": "Template" + }, + "VoiceTemplate": { + "shape": "Template" } } }, @@ -9504,6 +9762,9 @@ "CreationDate": { "shape": "__string" }, + "DefaultSubstitutions": { + "shape": "__string" + }, "LastModifiedDate": { "shape": "__string" }, @@ -9511,6 +9772,9 @@ "shape": "MapOf__string", "locationName": "tags" }, + "TemplateDescription": { + "shape": "__string" + }, "TemplateName": { "shape": "__string" }, @@ -10261,6 +10525,36 @@ ], "payload": "VoiceChannelResponse" }, + "UpdateVoiceTemplateRequest": { + "type": "structure", + "members": { + "TemplateName": { + "shape": "__string", + "location": "uri", + "locationName": "template-name" + }, + "VoiceTemplateRequest": { + "shape": "VoiceTemplateRequest" + } + }, + "required": [ + "TemplateName", + "VoiceTemplateRequest" + ], + "payload": "VoiceTemplateRequest" + }, + "UpdateVoiceTemplateResponse": { + "type": "structure", + "members": { + "MessageBody": { + "shape": "MessageBody" + } + }, + "required": [ + "MessageBody" + ], + "payload": "MessageBody" + }, "VoiceChannelRequest": { "type": "structure", "members": { @@ -10327,6 +10621,75 @@ } } }, + "VoiceTemplateRequest": { + "type": "structure", + "members": { + "Body": { + "shape": "__string" + }, + "DefaultSubstitutions": { + "shape": "__string" + }, + "LanguageCode": { + "shape": "__string" + }, + "tags": { + "shape": "MapOf__string", + "locationName": "tags" + }, + "TemplateDescription": { + "shape": "__string" + }, + "VoiceId": { + "shape": "__string" + } + } + }, + "VoiceTemplateResponse": { + "type": "structure", + "members": { + "Arn": { + "shape": "__string" + }, + "Body": { + "shape": "__string" + }, + "CreationDate": { + "shape": "__string" + }, + "DefaultSubstitutions": { + "shape": "__string" + }, + "LanguageCode": { + "shape": "__string" + }, + "LastModifiedDate": { + "shape": "__string" + }, + "tags": { + "shape": "MapOf__string", + "locationName": "tags" + }, + "TemplateDescription": { + "shape": "__string" + }, + "TemplateName": { + "shape": "__string" + }, + "TemplateType": { + "shape": "TemplateType" + }, + "VoiceId": { + "shape": "__string" + } + }, + "required": [ + "LastModifiedDate", + "CreationDate", + "TemplateName", + "TemplateType" + ] + }, "WaitActivity": { "type": "structure", "members": { diff --git a/models/apis/pinpoint/2016-12-01/docs-2.json b/models/apis/pinpoint/2016-12-01/docs-2.json index fc848d862c2..d1a0ee431d1 100644 --- a/models/apis/pinpoint/2016-12-01/docs-2.json +++ b/models/apis/pinpoint/2016-12-01/docs-2.json @@ -11,6 +11,7 @@ "CreatePushTemplate" : "

Creates a message template that you can use in messages that are sent through a push notification channel.

", "CreateSegment" : "

Creates a new segment for an application or updates the configuration, dimension, and other settings for an existing segment that's associated with an application.

", "CreateSmsTemplate" : "

Creates a message template that you can use in messages that are sent through the SMS channel.

", + "CreateVoiceTemplate" : "

Creates a message template that you can use in messages that are sent through the voice channel.

", "DeleteAdmChannel" : "

Disables the ADM channel for an application and deletes any existing settings for the channel.

", "DeleteApnsChannel" : "

Disables the APNs channel for an application and deletes any existing settings for the channel.

", "DeleteApnsSandboxChannel" : "

Disables the APNs sandbox channel for an application and deletes any existing settings for the channel.

", @@ -31,6 +32,7 @@ "DeleteSmsTemplate" : "

Deletes a message template that was designed for use in messages that were sent through the SMS channel.

", "DeleteUserEndpoints" : "

Deletes all the endpoints that are associated with a specific user ID.

", "DeleteVoiceChannel" : "

Disables the voice channel for an application and deletes any existing settings for the channel.

", + "DeleteVoiceTemplate" : "

Deletes a message template that was designed for use in messages that were sent through the voice channel.

", "GetAdmChannel" : "

Retrieves information about the status and settings of the ADM channel for an application.

", "GetApnsChannel" : "

Retrieves information about the status and settings of the APNs channel for an application.

", "GetApnsSandboxChannel" : "

Retrieves information about the status and settings of the APNs sandbox channel for an application.

", @@ -72,6 +74,7 @@ "GetSmsTemplate" : "

Retrieves the content and settings for a message template that you can use in messages that are sent through the SMS channel.

", "GetUserEndpoints" : "

Retrieves information about all the endpoints that are associated with a specific user ID.

", "GetVoiceChannel" : "

Retrieves information about the status and settings of the voice channel for an application.

", + "GetVoiceTemplate" : "

Retrieves the content and settings for a message template that you can use in messages that are sent through the voice channel.

", "ListJourneys" : "

Retrieves information about the status, configuration, and other settings for all the journeys that are associated with an application.

", "ListTagsForResource" : "

Retrieves all the tags (keys and values) that are associated with an application, campaign, journey, message template, or segment.

", "ListTemplates" : "

Retrieves information about all the message templates that are associated with your Amazon Pinpoint account.

", @@ -102,7 +105,8 @@ "UpdateSegment" : "

Creates a new segment for an application or updates the configuration, dimension, and other settings for an existing segment that's associated with an application.

", "UpdateSmsChannel" : "

Enables the SMS channel for an application or updates the status and settings of the SMS channel for an application.

", "UpdateSmsTemplate" : "

Updates an existing message template that you can use in messages that are sent through the SMS channel.

", - "UpdateVoiceChannel" : "

Enables the voice channel for an application or updates the status and settings of the voice channel for an application.

" + "UpdateVoiceChannel" : "

Enables the voice channel for an application or updates the status and settings of the voice channel for an application.

", + "UpdateVoiceTemplate" : "

Updates an existing message template that you can use in messages that are sent through the voice channel.

" }, "shapes" : { "ADMChannelRequest" : { @@ -376,9 +380,9 @@ "refs" : { } }, "DefaultMessage" : { - "base" : "

Specifies the default message to use for all channels.

", + "base" : "

Specifies the default message for all channels.

", "refs" : { - "DirectMessageConfiguration$DefaultMessage" : "

The default message body for all channels.

" + "DirectMessageConfiguration$DefaultMessage" : "

The default message for all channels.

" } }, "DefaultPushNotificationMessage" : { @@ -410,8 +414,8 @@ "DirectMessageConfiguration" : { "base" : "

Specifies the settings and content for the default message and any default messages that you tailored for specific channels.

", "refs" : { - "MessageRequest$MessageConfiguration" : "

The set of properties that defines the configuration settings for the message.

", - "SendUsersMessageRequest$MessageConfiguration" : "

The message definitions for the default message and any default messages that you defined for specific channels.

" + "MessageRequest$MessageConfiguration" : "

The settings and content for the default message and any default messages that you defined for specific channels.

", + "SendUsersMessageRequest$MessageConfiguration" : "

The settings and content for the default message and any default messages that you defined for specific channels.

" } }, "Duration" : { @@ -749,7 +753,7 @@ } }, "MessageRequest" : { - "base" : "

Specifies the objects that define configuration and other settings for a message.

", + "base" : "

Specifies the configuration and other settings for a message.

", "refs" : { } }, "MessageResponse" : { @@ -1030,9 +1034,9 @@ "SimpleEmailPart" : { "base" : "

Specifies the subject or body of an email message, represented as textual email data and the applicable character set.

", "refs" : { - "SimpleEmail$HtmlPart" : "

The body of the email message, in HTML format. We recommend using an HTML part for email clients that support HTML. You can include links, formatted text, and more in an HTML message.

", + "SimpleEmail$HtmlPart" : "

The body of the email message, in HTML format. We recommend using HTML format for email clients that render HTML content. You can include links, formatted text, and more in an HTML message.

", "SimpleEmail$Subject" : "

The subject line, or title, of the email.

", - "SimpleEmail$TextPart" : "

The body of the email message, in text format. We recommend using a text part for email clients that don't support HTML and clients that are connected to high-latency networks, such as mobile devices.

" + "SimpleEmail$TextPart" : "

The body of the email message, in plain text format. We recommend using plain text format for email clients that don't render HTML content and clients that are connected to high-latency networks, such as mobile devices.

" } }, "SourceType" : { @@ -1065,11 +1069,12 @@ "refs" : { "TemplateConfiguration$EmailTemplate" : "

The email template to use for the message.

", "TemplateConfiguration$PushTemplate" : "

The push notification template to use for the message.

", - "TemplateConfiguration$SMSTemplate" : "

The SMS template to use for the message.

" + "TemplateConfiguration$SMSTemplate" : "

The SMS template to use for the message.

", + "TemplateConfiguration$VoiceTemplate" : "

The voice template to use for the message.

" } }, "TemplateConfiguration" : { - "base" : "

Specifies the message template for each type of channel.

", + "base" : "

Specifies the message template to use for the message, for each type of channel.

", "refs" : { "CampaignResponse$TemplateConfiguration" : "

The message template that’s used for the campaign.

", "MessageRequest$TemplateConfiguration" : "

The message template to use for the message.

", @@ -1091,7 +1096,8 @@ "EmailTemplateResponse$TemplateType" : "

The type of channel that the message template is designed for. For an email template, this value is EMAIL.

", "PushNotificationTemplateResponse$TemplateType" : "

The type of channel that the message template is designed for. For a push notification template, this value is PUSH.

", "SMSTemplateResponse$TemplateType" : "

The type of channel that the message template is designed for. For an SMS template, this value is SMS.

", - "TemplateResponse$TemplateType" : "

The type of channel that the message template is designed for.

" + "TemplateResponse$TemplateType" : "

The type of channel that the message template is designed for.

", + "VoiceTemplateResponse$TemplateType" : "

The type of channel that the message template is designed for. For a voice template, this value is VOICE.

" } }, "TemplatesResponse" : { @@ -1132,6 +1138,14 @@ "DirectMessageConfiguration$VoiceMessage" : "

The default message for the voice channel. This message overrides the default message (DefaultMessage).

" } }, + "VoiceTemplateRequest" : { + "base" : "

Specifies the content and settings for a message template that can be used in messages that are sent through the voice channel.

", + "refs" : { } + }, + "VoiceTemplateResponse" : { + "base" : "

Provides information about the content and settings for a message template that can be used in messages that are sent through the voice channel.

", + "refs" : { } + }, "WaitActivity" : { "base" : "

Specifies the settings for a wait activity in a journey. This type of activity waits for a certain amount of time or until a specific date and time before moving participants to the next activity in a journey.

", "refs" : { @@ -1551,9 +1565,9 @@ "refs" : { "ADMMessage$Substitutions" : "

The default message variables to use in the notification message. You can override the default variables with individual address variables.

", "APNSMessage$Substitutions" : "

The default message variables to use in the notification message. You can override these default variables with individual address variables.

", - "AddressConfiguration$Substitutions" : "

An object that maps variable values for the message. Amazon Pinpoint merges these values with the variable values specified by properties of the DefaultMessage object. The substitutions in this map take precedence over all other substitutions.

", + "AddressConfiguration$Substitutions" : "

A map of the message variables to merge with the variables specified by properties of the DefaultMessage object. The variables specified in this map take precedence over all other variables.

", "BaiduMessage$Substitutions" : "

The default message variables to use in the notification message. You can override the default variables with individual address variables.

", - "DefaultMessage$Substitutions" : "

The default message variables to use in the push notification, email, or SMS message. You can override these default variables with individual address variables.

", + "DefaultMessage$Substitutions" : "

The default message variables to use in the message. You can override these default variables with individual address variables.

", "DefaultPushNotificationMessage$Substitutions" : "

The default message variables to use in the notification message. You can override the default variables with individual address variables.

", "EmailMessage$Substitutions" : "

The default message variables to use in the email message. You can override the default variables with individual address variables.

", "EndpointBatchItem$Attributes" : "

One or more custom attributes that describe the endpoint by associating a name with an array of values. For example, the value of a custom attribute named Interests might be: [\"science\", \"music\", \"travel\"]. You can use these attributes as filter criteria when you create segments.

When you define the name of a custom attribute, avoid using the following characters: number sign (#), colon (:), question mark (?), backslash (\\), and slash (/). The Amazon Pinpoint console can't display attribute names that contain these characters. This limitation doesn't apply to attribute values.

", @@ -1601,6 +1615,8 @@ "SendUsersMessageRequest$Context" : "

A map of custom attribute-value pairs. For a push notification, Amazon Pinpoint adds these attributes to the data.pinpoint object in the body of the notification payload. Amazon Pinpoint also provides these attributes in the events that it generates for users-messages deliveries.

", "TagsModel$tags" : "

A string-to-string map of key-value pairs that defines the tags for an application, campaign, journey, message template, or segment. Each of these resources can have a maximum of 50 tags.

Each tag consists of a required tag key and an associated tag value. The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

", "TemplateResponse$tags" : "

A string-to-string map of key-value pairs that identifies the tags that are associated with the message template. Each tag consists of a required tag key and an associated tag value.

", + "VoiceTemplateRequest$tags" : "

A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

", + "VoiceTemplateResponse$tags" : "

A string-to-string map of key-value pairs that identifies the tags that are associated with the message template. Each tag consists of a required tag key and an associated tag value.

", "WriteCampaignRequest$tags" : "

A string-to-string map of key-value pairs that defines the tags to associate with the campaign. Each tag consists of a required tag key and an associated tag value.

", "WriteSegmentRequest$tags" : "

A string-to-string map of key-value pairs that defines the tags to associate with the segment. Each tag consists of a required tag key and an associated tag value.

" } @@ -1623,7 +1639,7 @@ "ADMMessage$ImageIconUrl" : "

The URL of the large icon image to display in the content view of the push notification.

", "ADMMessage$ImageUrl" : "

The URL of an image to display in the push notification.

", "ADMMessage$MD5" : "

The base64-encoded, MD5 checksum of the value specified by the Data property. ADM uses the MD5 value to verify the integrity of the data.

", - "ADMMessage$RawContent" : "

The raw, JSON-formatted string to use as the payload for the notification message. This value overrides the message.

", + "ADMMessage$RawContent" : "

The raw, JSON-formatted string to use as the payload for the notification message. If specified, this value overrides all other content for the message.

", "ADMMessage$SmallImageIconUrl" : "

The URL of the small icon image to display in the status bar and the content view of the push notification.

", "ADMMessage$Sound" : "

The sound to play when the recipient receives the push notification. You can use the default stream or specify the file name of a sound resource that's bundled in your app. On an Android platform, the sound file must reside in /res/raw/.

", "ADMMessage$Title" : "

The title to display above the notification message on the recipient's device.

", @@ -1649,13 +1665,14 @@ "APNSMessage$MediaUrl" : "

The URL of an image or video to display in the push notification.

", "APNSMessage$PreferredAuthenticationMethod" : "

The authentication method that you want Amazon Pinpoint to use when authenticating with APNs, CERTIFICATE or TOKEN.

", "APNSMessage$Priority" : "

para>5 - Low priority, the notification might be delayed, delivered as part of a group, or throttled.

/listitem>
  • 10 - High priority, the notification is sent immediately. This is the default value. A high priority notification should trigger an alert, play a sound, or badge your app's icon on the recipient's device.

  • /para>

    Amazon Pinpoint specifies this value in the apns-priority request header when it sends the notification message to APNs.

    The equivalent values for Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), are normal, for 5, and high, for 10. If you specify an FCM value for this property, Amazon Pinpoint accepts and converts the value to the corresponding APNs value.

    ", - "APNSMessage$RawContent" : "

    The raw, JSON-formatted string to use as the payload for the notification message. This value overrides all other content for the message.

    If you specify the raw content of an APNs push notification, the message payload has to include the content-available key. The value of the content-available key has to be an integer, and can only be 0 or 1. If you're sending a standard notification, set the value of content-available to 0. If you're sending a silent (background) notification, set the value of content-available to 1. Additionally, silent notification payloads can't include the alert, badge, or sound keys. For more information, see Generating a Remote Notification and Pushing Background Updates to Your App on the Apple Developer website.

    ", + "APNSMessage$RawContent" : "

    The raw, JSON-formatted string to use as the payload for the notification message. If specified, this value overrides all other content for the message.

    If you specify the raw content of an APNs push notification, the message payload has to include the content-available key. The value of the content-available key has to be an integer, and can only be 0 or 1. If you're sending a standard notification, set the value of content-available to 0. If you're sending a silent (background) notification, set the value of content-available to 1. Additionally, silent notification payloads can't include the alert, badge, or sound keys. For more information, see Generating a Remote Notification and Pushing Background Updates to Your App on the Apple Developer website.

    ", "APNSMessage$Sound" : "

    The key for the sound to play when the recipient receives the push notification. The value for this key is the name of a sound file in your app's main bundle or the Library/Sounds folder in your app's data container. If the sound file can't be found or you specify default for the value, the system plays the default alert sound.

    ", "APNSMessage$ThreadId" : "

    The key that represents your app-specific identifier for grouping notifications. If you provide a Notification Content app extension, you can use this value to group your notifications together.

    ", "APNSMessage$Title" : "

    The title to display above the notification message on the recipient's device.

    ", "APNSMessage$Url" : "

    The URL to open in the recipient's default mobile browser, if a recipient taps the push notification and the value of the Action property is URL.

    ", "APNSPushNotificationTemplate$Body" : "

    The message body to use in push notifications that are based on the message template.

    ", "APNSPushNotificationTemplate$MediaUrl" : "

    The URL of an image or video to display in push notifications that are based on the message template.

    ", + "APNSPushNotificationTemplate$RawContent" : "

    The raw, JSON-formatted string to use as the payload for push notifications that are based on the message template. If specified, this value overrides all other content for the message template.

    ", "APNSPushNotificationTemplate$Sound" : "

    The key for the sound to play when the recipient receives a push notification that's based on the message template. The value for this key is the name of a sound file in your app's main bundle or the Library/Sounds folder in your app's data container. If the sound file can't be found or you specify default for the value, the system plays the default alert sound.

    ", "APNSPushNotificationTemplate$Title" : "

    The title to use in push notifications that are based on the message template. This title appears above the notification message on a recipient's device.

    ", "APNSPushNotificationTemplate$Url" : "

    The URL to open in the recipient's default mobile browser, if a recipient taps a push notification that's based on the message template and the value of the Action property is URL.

    ", @@ -1713,11 +1730,12 @@ "ActivityResponse$State" : "

    The current status of the activity. Possible values are: PENDING, INITIALIZING, RUNNING, PAUSED, CANCELLED, and COMPLETED.

    ", "ActivityResponse$TreatmentId" : "

    The unique identifier for the campaign treatment that the activity applies to. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

    ", "AddressConfiguration$BodyOverride" : "

    The message body to use instead of the default message body. This value overrides the default message body.

    ", - "AddressConfiguration$RawContent" : "

    The raw, JSON-formatted string to use as the payload for the notification message. This value overrides the message.

    ", + "AddressConfiguration$RawContent" : "

    The raw, JSON-formatted string to use as the payload for the message. If specified, this value overrides all other values for the message.

    ", "AddressConfiguration$TitleOverride" : "

    The message title to use instead of the default message title. This value overrides the default message title.

    ", "AndroidPushNotificationTemplate$Body" : "

    The message body to use in a push notification that's based on the message template.

    ", "AndroidPushNotificationTemplate$ImageIconUrl" : "

    The URL of the large icon image to display in the content view of a push notification that's based on the message template.

    ", "AndroidPushNotificationTemplate$ImageUrl" : "

    The URL of an image to display in a push notification that's based on the message template.

    ", + "AndroidPushNotificationTemplate$RawContent" : "

    The raw, JSON-formatted string to use as the payload for a push notification that's based on the message template. If specified, this value overrides all other content for the message template.

    ", "AndroidPushNotificationTemplate$SmallImageIconUrl" : "

    The URL of the small icon image to display in the status bar and the content view of a push notification that's based on the message template.

    ", "AndroidPushNotificationTemplate$Sound" : "

    The sound to play when a recipient receives a push notification that's based on the message template. You can use the default stream or specify the file name of a sound resource that's bundled in your app. On an Android platform, the sound file must reside in /res/raw/.

    ", "AndroidPushNotificationTemplate$Title" : "

    The title to use in a push notification that's based on the message template. This title appears above the notification message on a recipient's device.

    ", @@ -1748,7 +1766,7 @@ "BaiduMessage$IconReference" : "

    The icon image name of the asset saved in your app.

    ", "BaiduMessage$ImageIconUrl" : "

    The URL of the large icon image to display in the content view of the push notification.

    ", "BaiduMessage$ImageUrl" : "

    The URL of an image to display in the push notification.

    ", - "BaiduMessage$RawContent" : "

    The raw, JSON-formatted string to use as the payload for the notification message. This value overrides the message.

    ", + "BaiduMessage$RawContent" : "

    The raw, JSON-formatted string to use as the payload for the notification message. If specified, this value overrides all other content for the message.

    ", "BaiduMessage$SmallImageIconUrl" : "

    The URL of the small icon image to display in the status bar and the content view of the push notification.

    ", "BaiduMessage$Sound" : "

    The sound to play when the recipient receives the push notification. You can use the default stream or specify the file name of a sound resource that's bundled in your app. On an Android platform, the sound file must reside in /res/raw/.

    ", "BaiduMessage$Title" : "

    The title to display above the notification message on the recipient's device.

    ", @@ -1759,9 +1777,9 @@ "CampaignDateRangeKpiResponse$KpiName" : "

    The name of the metric, also referred to as a key performance indicator (KPI), that the data was retrieved for. This value describes the associated metric and consists of two or more terms, which are comprised of lowercase alphanumeric characters, separated by a hyphen. For a list of possible values, see the Amazon Pinpoint Developer Guide.

    ", "CampaignDateRangeKpiResponse$NextToken" : "

    The string to use in a subsequent request to get the next page of results in a paginated response. This value is null for the Campaign Metrics resource because the resource returns all results in a single page.

    ", "CampaignDateRangeKpiResponse$StartTime" : "

    The first date and time of the date range that was used to filter the query results, in extended ISO 8601 format. The date range is inclusive.

    ", - "CampaignEmailMessage$Body" : "

    The body of the email for recipients whose email clients don't support HTML content.

    ", + "CampaignEmailMessage$Body" : "

    The body of the email for recipients whose email clients don't render HTML content.

    ", "CampaignEmailMessage$FromAddress" : "

    The verified email address to send the email from. The default address is the FromAddress specified for the email channel for the application.

    ", - "CampaignEmailMessage$HtmlBody" : "

    The body of the email, in HTML format, for recipients whose email clients support HTML content.

    ", + "CampaignEmailMessage$HtmlBody" : "

    The body of the email, in HTML format, for recipients whose email clients render HTML content.

    ", "CampaignEmailMessage$Title" : "

    The subject line, or title, of the email.

    ", "CampaignHook$LambdaFunctionName" : "

    The name or Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Pinpoint invokes to send messages for a campaign.

    ", "CampaignHook$WebUrl" : "

    The web URL that Amazon Pinpoint calls to invoke the AWS Lambda function over HTTPS.

    ", @@ -1789,7 +1807,7 @@ "CreateTemplateMessageBody$Arn" : "

    The Amazon Resource Name (ARN) of the message template that was created.

    ", "CreateTemplateMessageBody$Message" : "

    The message that's returned from the API for the request to create the message template.

    ", "CreateTemplateMessageBody$RequestID" : "

    The unique identifier for the request to create the message template.

    ", - "DefaultMessage$Body" : "

    The default message body of the push notification, email, or SMS message.

    ", + "DefaultMessage$Body" : "

    The default body of the message.

    ", "DefaultPushNotificationMessage$Body" : "

    The default body of the notification message.

    ", "DefaultPushNotificationMessage$Title" : "

    The default title to display above the notification message on a recipient's device.

    ", "DefaultPushNotificationMessage$Url" : "

    The default URL to open in a recipient's default mobile browser, if a recipient taps the push notification and the value of the Action property is URL.

    ", @@ -1816,16 +1834,20 @@ "EmailMessage$FromAddress" : "

    The verified email address to send the email message from. The default value is the FromAddress specified for the email channel.

    ", "EmailMessageActivity$NextActivity" : "

    The unique identifier for the next activity to perform, after the message is sent.

    ", "EmailMessageActivity$TemplateName" : "

    The name of the email template to use for the message.

    ", - "EmailTemplateRequest$HtmlPart" : "

    The message body, in HTML format, to use in email messages that are based on the message template. We recommend using HTML format for email clients that support HTML. You can include links, formatted text, and more in an HTML message.

    ", + "EmailTemplateRequest$DefaultSubstitutions" : "

    A JSON object that specifies the default values to use for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable. When you create a message that's based on the template, you can override these defaults with message-specific and address-specific variables and values.

    ", + "EmailTemplateRequest$HtmlPart" : "

    The message body, in HTML format, to use in email messages that are based on the message template. We recommend using HTML format for email clients that render HTML content. You can include links, formatted text, and more in an HTML message.

    ", "EmailTemplateRequest$Subject" : "

    The subject line, or title, to use in email messages that are based on the message template.

    ", - "EmailTemplateRequest$TextPart" : "

    The message body, in text format, to use in email messages that are based on the message template. We recommend using text format for email clients that don't support HTML and clients that are connected to high-latency networks, such as mobile devices.

    ", + "EmailTemplateRequest$TemplateDescription" : "

    A custom description of the message template.

    ", + "EmailTemplateRequest$TextPart" : "

    The message body, in plain text format, to use in email messages that are based on the message template. We recommend using plain text format for email clients that don't render HTML content and clients that are connected to high-latency networks, such as mobile devices.

    ", "EmailTemplateResponse$Arn" : "

    The Amazon Resource Name (ARN) of the message template.

    ", "EmailTemplateResponse$CreationDate" : "

    The date when the message template was created.

    ", + "EmailTemplateResponse$DefaultSubstitutions" : "

    The JSON object that specifies the default values that are used for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable.

    ", "EmailTemplateResponse$HtmlPart" : "

    The message body, in HTML format, that's used in email messages that are based on the message template.

    ", "EmailTemplateResponse$LastModifiedDate" : "

    The date when the message template was last modified.

    ", "EmailTemplateResponse$Subject" : "

    The subject line, or title, that's used in email messages that are based on the message template.

    ", + "EmailTemplateResponse$TemplateDescription" : "

    The custom description of the message template.

    ", "EmailTemplateResponse$TemplateName" : "

    The name of the message template.

    ", - "EmailTemplateResponse$TextPart" : "

    The message body, in text format, that's used in email messages that are based on the message template.

    ", + "EmailTemplateResponse$TextPart" : "

    The message body, in plain text format, that's used in email messages that are based on the message template.

    ", "EndpointBatchItem$Address" : "

    The destination address for messages or push notifications that you send to the endpoint. The address varies by channel. For a push-notification channel, use the token provided by the push notification service, such as an Apple Push Notification service (APNs) device token or a Firebase Cloud Messaging (FCM) registration token. For the SMS channel, use a phone number in E.164 format, such as +12065550100. For the email channel, use an email address.

    ", "EndpointBatchItem$EffectiveDate" : "

    The date and time, in ISO 8601 format, when the endpoint was created or updated.

    ", "EndpointBatchItem$EndpointStatus" : "

    Specifies whether to send messages or push notifications to the endpoint. Valid values are: ACTIVE, messages are sent to the endpoint; and, INACTIVE, messages aren’t sent to the endpoint.

    Amazon Pinpoint automatically sets this value to ACTIVE when you create an endpoint or update an existing endpoint. Amazon Pinpoint automatically sets this value to INACTIVE if you update another endpoint that has the same address specified by the Address property.

    ", @@ -1864,7 +1886,7 @@ "EndpointResponse$OptOut" : "

    Specifies whether the user who's associated with the endpoint has opted out of receiving messages and push notifications from you. Possible values are: ALL, the user has opted out and doesn't want to receive any messages or push notifications; and, NONE, the user hasn't opted out and wants to receive all messages and push notifications.

    ", "EndpointResponse$RequestId" : "

    The unique identifier for the most recent request to update the endpoint.

    ", "EndpointSendConfiguration$BodyOverride" : "

    The body of the message. If specified, this value overrides the default message body.

    ", - "EndpointSendConfiguration$RawContent" : "

    The raw, JSON-formatted string to use as the payload for the message. If specified, this value overrides the message.

    ", + "EndpointSendConfiguration$RawContent" : "

    The raw, JSON-formatted string to use as the payload for the message. If specified, this value overrides all other values for the message.

    ", "EndpointSendConfiguration$TitleOverride" : "

    The title or subject line of the message. If specified, this value overrides the default message title or subject line.

    ", "EndpointUser$UserId" : "

    The unique identifier for the user.

    ", "Event$AppPackageName" : "

    The package name of the app that's recording the event.

    ", @@ -1908,7 +1930,7 @@ "GCMMessage$ImageIconUrl" : "

    The URL of the large icon image to display in the content view of the push notification.

    ", "GCMMessage$ImageUrl" : "

    The URL of an image to display in the push notification.

    ", "GCMMessage$Priority" : "

    para>normal - The notification might be delayed. Delivery is optimized for battery usage on the recipient's device. Use this value unless immediate delivery is required.

    /listitem>
  • high - The notification is sent immediately and might wake a sleeping device.

  • /para>

    Amazon Pinpoint specifies this value in the FCM priority parameter when it sends the notification message to FCM.

    The equivalent values for Apple Push Notification service (APNs) are 5, for normal, and 10, for high. If you specify an APNs value for this property, Amazon Pinpoint accepts and converts the value to the corresponding FCM value.

    ", - "GCMMessage$RawContent" : "

    The raw, JSON-formatted string to use as the payload for the notification message. This value overrides the message.

    ", + "GCMMessage$RawContent" : "

    The raw, JSON-formatted string to use as the payload for the notification message. If specified, this value overrides all other content for the message.

    ", "GCMMessage$RestrictedPackageName" : "

    The package name of the application where registration tokens must match in order for the recipient to receive the message.

    ", "GCMMessage$SmallImageIconUrl" : "

    The URL of the small icon image to display in the status bar and the content view of the push notification.

    ", "GCMMessage$Sound" : "

    The sound to play when the recipient receives the push notification. You can use the default stream or specify the file name of a sound resource that's bundled in your app. On an Android platform, the sound file must reside in /res/raw/.

    ", @@ -1963,7 +1985,7 @@ "Message$ImageUrl" : "

    The URL of an image to display in the push notification.

    ", "Message$JsonBody" : "

    The JSON payload to use for a silent push notification.

    ", "Message$MediaUrl" : "

    The URL of the image or video to display in the push notification.

    ", - "Message$RawContent" : "

    The raw, JSON-formatted string to use as the payload for the notification message. This value overrides other values for the message.

    ", + "Message$RawContent" : "

    The raw, JSON-formatted string to use as the payload for the notification message. If specified, this value overrides all other content for the message.

    ", "Message$Title" : "

    The title to display above the notification message on a recipient's device.

    ", "Message$Url" : "

    The URL to open in a recipient's default mobile browser, if a recipient taps the push notification and the value of the Action property is URL.

    ", "MessageBody$Message" : "

    The message that's returned from the API.

    ", @@ -1997,9 +2019,13 @@ "PublicEndpoint$EndpointStatus" : "

    Specifies whether to send messages or push notifications to the endpoint. Valid values are: ACTIVE, messages are sent to the endpoint; and, INACTIVE, messages aren’t sent to the endpoint.

    Amazon Pinpoint automatically sets this value to ACTIVE when you create an endpoint or update an existing endpoint. Amazon Pinpoint automatically sets this value to INACTIVE if you update another endpoint that has the same address specified by the Address property.

    ", "PublicEndpoint$OptOut" : "

    Specifies whether the user who's associated with the endpoint has opted out of receiving messages and push notifications from you. Possible values are: ALL, the user has opted out and doesn't want to receive any messages or push notifications; and, NONE, the user hasn't opted out and wants to receive all messages and push notifications.

    ", "PublicEndpoint$RequestId" : "

    A unique identifier that's generated each time the endpoint is updated.

    ", + "PushNotificationTemplateRequest$DefaultSubstitutions" : "

    A JSON object that specifies the default values to use for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable. When you create a message that's based on the template, you can override these defaults with message-specific and address-specific variables and values.

    ", + "PushNotificationTemplateRequest$TemplateDescription" : "

    A custom description of the message template.

    ", "PushNotificationTemplateResponse$Arn" : "

    The Amazon Resource Name (ARN) of the message template.

    ", "PushNotificationTemplateResponse$CreationDate" : "

    The date when the message template was created.

    ", + "PushNotificationTemplateResponse$DefaultSubstitutions" : "

    The JSON object that specifies the default values that are used for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable.

    ", "PushNotificationTemplateResponse$LastModifiedDate" : "

    The date when the message template was last modified.

    ", + "PushNotificationTemplateResponse$TemplateDescription" : "

    The custom description of the message template.

    ", "PushNotificationTemplateResponse$TemplateName" : "

    The name of the message template.

    ", "QuietTime$End" : "

    The specific time when quiet time ends. This value has to use 24-hour notation and be in HH:MM format, where HH is the hour (with a leading zero, if applicable) and MM is the minutes. For example, use 02:30 to represent 2:30 AM, or 14:30 to represent 2:30 PM.

    ", "QuietTime$Start" : "

    The specific time when quiet time begins. This value has to use 24-hour notation and be in HH:MM format, where HH is the hour (with a leading zero, if applicable) and MM is the minutes. For example, use 02:30 to represent 2:30 AM, or 14:30 to represent 2:30 PM.

    ", @@ -2023,10 +2049,14 @@ "SMSMessage$OriginationNumber" : "

    The number to send the SMS message from. This value should be one of the dedicated long or short codes that's assigned to your AWS account. If you don't specify a long or short code, Amazon Pinpoint assigns a random long code to the SMS message and sends the message from that code.

    ", "SMSMessage$SenderId" : "

    The sender ID to display as the sender of the message on a recipient's device. Support for sender IDs varies by country or region.

    ", "SMSTemplateRequest$Body" : "

    The message body to use in text messages that are based on the message template.

    ", + "SMSTemplateRequest$DefaultSubstitutions" : "

    A JSON object that specifies the default values to use for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable. When you create a message that's based on the template, you can override these defaults with message-specific and address-specific variables and values.

    ", + "SMSTemplateRequest$TemplateDescription" : "

    A custom description of the message template.

    ", "SMSTemplateResponse$Arn" : "

    The Amazon Resource Name (ARN) of the message template.

    ", "SMSTemplateResponse$Body" : "

    The message body that's used in text messages that are based on the message template.

    ", "SMSTemplateResponse$CreationDate" : "

    The date when the message template was created.

    ", + "SMSTemplateResponse$DefaultSubstitutions" : "

    The JSON object that specifies the default values that are used for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable.

    ", "SMSTemplateResponse$LastModifiedDate" : "

    The date when the message template was last modified.

    ", + "SMSTemplateResponse$TemplateDescription" : "

    The custom description of the message template.

    ", "SMSTemplateResponse$TemplateName" : "

    The name of the message template.

    ", "Schedule$EndTime" : "

    The scheduled time, in ISO 8601 format, when the campaign ended or will end.

    ", "Schedule$StartTime" : "

    The scheduled time, in ISO 8601 format, when the campaign began or will begin.

    ", @@ -2055,7 +2085,9 @@ "Template$Name" : "

    The name of the message template to use for the message. If specified, this value must match the name of an existing message template.

    ", "TemplateResponse$Arn" : "

    The Amazon Resource Name (ARN) of the message template.

    ", "TemplateResponse$CreationDate" : "

    The date when the message template was created.

    ", + "TemplateResponse$DefaultSubstitutions" : "

    The JSON object that specifies the default values that are used for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable.

    ", "TemplateResponse$LastModifiedDate" : "

    The date when the message template was last modified.

    ", + "TemplateResponse$TemplateDescription" : "

    The custom description of the message template.

    ", "TemplateResponse$TemplateName" : "

    The name of the message template.

    ", "TemplatesResponse$NextToken" : "

    The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

    ", "TreatmentResource$Id" : "

    The unique identifier for the treatment.

    ", @@ -2067,17 +2099,31 @@ "VoiceChannelResponse$LastModifiedBy" : "

    The user who last modified the voice channel.

    ", "VoiceChannelResponse$LastModifiedDate" : "

    The date and time, in ISO 8601 format, when the voice channel was last modified.

    ", "VoiceChannelResponse$Platform" : "

    The type of messaging or notification platform for the channel. For the voice channel, this value is VOICE.

    ", - "VoiceMessage$Body" : "

    The text script for the voice message.

    ", - "VoiceMessage$LanguageCode" : "

    The language to use when delivering the message. For a list of supported languages, see the Amazon Polly Developer Guide.

    ", + "VoiceMessage$Body" : "

    The text of the script to use for the voice message.

    ", + "VoiceMessage$LanguageCode" : "

    The code for the language to use when synthesizing the text of the message script. For a list of supported languages and the code for each one, see the Amazon Polly Developer Guide.

    ", "VoiceMessage$OriginationNumber" : "

    The long code to send the voice message from. This value should be one of the dedicated long codes that's assigned to your AWS account. Although it isn't required, we recommend that you specify the long code in E.164 format, for example +12065550100, to ensure prompt and accurate delivery of the message.

    ", - "VoiceMessage$VoiceId" : "

    The name of the voice to use when delivering the message. For a list of supported voices, see the Amazon Polly Developer Guide.

    ", + "VoiceMessage$VoiceId" : "

    The name of the voice to use when delivering the message. For a list of supported voices, see the Amazon Polly Developer Guide.

    ", + "VoiceTemplateRequest$Body" : "

    The text of the script to use in messages that are based on the message template, in plain text format.

    ", + "VoiceTemplateRequest$DefaultSubstitutions" : "

    A JSON object that specifies the default values to use for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable. When you create a message that's based on the template, you can override these defaults with message-specific and address-specific variables and values.

    ", + "VoiceTemplateRequest$LanguageCode" : "

    The code for the language to use when synthesizing the text of the script in messages that are based on the message template. For a list of supported languages and the code for each one, see the Amazon Polly Developer Guide.

    ", + "VoiceTemplateRequest$TemplateDescription" : "

    A custom description of the message template.

    ", + "VoiceTemplateRequest$VoiceId" : "

    The name of the voice to use when delivering messages that are based on the message template. For a list of supported voices, see the Amazon Polly Developer Guide.

    ", + "VoiceTemplateResponse$Arn" : "

    The Amazon Resource Name (ARN) of the message template.

    ", + "VoiceTemplateResponse$Body" : "

    The text of the script that's used in messages that are based on the message template, in plain text format.

    ", + "VoiceTemplateResponse$CreationDate" : "

    The date when the message template was created.

    ", + "VoiceTemplateResponse$DefaultSubstitutions" : "

    The JSON object that specifies the default values that are used for message variables in the message template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the default value for that variable.

    ", + "VoiceTemplateResponse$LanguageCode" : "

    The code for the language that's used when synthesizing the text of the script in messages that are based on the message template. For a list of supported languages and the code for each one, see the Amazon Polly Developer Guide.

    ", + "VoiceTemplateResponse$LastModifiedDate" : "

    The date when the message template was last modified.

    ", + "VoiceTemplateResponse$TemplateDescription" : "

    The custom description of the message template.

    ", + "VoiceTemplateResponse$TemplateName" : "

    The name of the message template.

    ", + "VoiceTemplateResponse$VoiceId" : "

    The name of the voice that's used when delivering messages that are based on the message template. For a list of supported voices, see the Amazon Polly Developer Guide.

    ", "WaitActivity$NextActivity" : "

    The unique identifier for the next activity to perform, after performing the wait activity.

    ", - "WaitTime$WaitFor" : "

    The amount of time, as a duration in ISO 8601 format, to wait before determining whether the activity's conditions have been met or moving participants to the next activity in the journey.

    ", + "WaitTime$WaitFor" : "

    The amount of time to wait, as a duration in ISO 8601 format, before determining whether the activity's conditions have been met or moving participants to the next activity in the journey.

    ", "WaitTime$WaitUntil" : "

    The date and time, in ISO 8601 format, when Amazon Pinpoint determines whether the activity's conditions have been met or the activity moves participants to the next activity in the journey.

    ", - "WriteCampaignRequest$Description" : "

    The custom description of the campaign.

    ", + "WriteCampaignRequest$Description" : "

    A custom description of the campaign.

    ", "WriteCampaignRequest$Name" : "

    The custom name of the campaign.

    ", "WriteCampaignRequest$SegmentId" : "

    The unique identifier for the segment to associate with the campaign.

    ", - "WriteCampaignRequest$TreatmentDescription" : "

    The custom description of a variation of the campaign to use for A/B testing.

    ", + "WriteCampaignRequest$TreatmentDescription" : "

    A custom description of a variation of the campaign to use for A/B testing.

    ", "WriteCampaignRequest$TreatmentName" : "

    The custom name of a variation of the campaign to use for A/B testing.

    ", "WriteEventStream$DestinationStreamArn" : "

    The Amazon Resource Name (ARN) of the Amazon Kinesis data stream or Amazon Kinesis Data Firehose delivery stream that you want to publish event data to.

    For a Kinesis data stream, the ARN format is: arn:aws:kinesis:region:account-id:stream/stream_name\n

    For a Kinesis Data Firehose delivery stream, the ARN format is: arn:aws:firehose:region:account-id:deliverystream/stream_name\n

    ", "WriteEventStream$RoleArn" : "

    The AWS Identity and Access Management (IAM) role that authorizes Amazon Pinpoint to publish event data to the stream in your AWS account.

    ", @@ -2087,7 +2133,7 @@ "WriteJourneyRequest$RefreshFrequency" : "

    The frequency with which Amazon Pinpoint evaluates segment and event data for the journey, as a duration in ISO 8601 format.

    ", "WriteJourneyRequest$StartActivity" : "

    The unique identifier for the first activity in the journey.

    ", "WriteSegmentRequest$Name" : "

    The name of the segment.

    ", - "WriteTreatmentResource$TreatmentDescription" : "

    The custom description of the treatment.

    ", + "WriteTreatmentResource$TreatmentDescription" : "

    A custom description of the treatment.

    ", "WriteTreatmentResource$TreatmentName" : "

    The custom name of the treatment. A treatment is a variation of a campaign that's used for A/B testing of a campaign.

    ", "ListOf__string$member" : null, "MapOf__string$member" : null diff --git a/models/apis/quicksight/2018-04-01/api-2.json b/models/apis/quicksight/2018-04-01/api-2.json index 054b5e433cf..925b05d0c69 100644 --- a/models/apis/quicksight/2018-04-01/api-2.json +++ b/models/apis/quicksight/2018-04-01/api-2.json @@ -11,6 +11,80 @@ "uid":"quicksight-2018-04-01" }, "operations":{ + "CancelIngestion":{ + "name":"CancelIngestion", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions/{IngestionId}" + }, + "input":{"shape":"CancelIngestionRequest"}, + "output":{"shape":"CancelIngestionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceExistsException"}, + {"shape":"InternalFailureException"} + ] + }, + "CreateDashboard":{ + "name":"CreateDashboard", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}" + }, + "input":{"shape":"CreateDashboardRequest"}, + "output":{"shape":"CreateDashboardResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ConflictException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "CreateDataSet":{ + "name":"CreateDataSet", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/data-sets" + }, + "input":{"shape":"CreateDataSetRequest"}, + "output":{"shape":"CreateDataSetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "CreateDataSource":{ + "name":"CreateDataSource", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/data-sources" + }, + "input":{"shape":"CreateDataSourceRequest"}, + "output":{"shape":"CreateDataSourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ] + }, "CreateGroup":{ "name":"CreateGroup", "http":{ @@ -49,6 +123,128 @@ {"shape":"ResourceUnavailableException"} ] }, + "CreateIAMPolicyAssignment":{ + "name":"CreateIAMPolicyAssignment", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/" + }, + "input":{"shape":"CreateIAMPolicyAssignmentRequest"}, + "output":{"shape":"CreateIAMPolicyAssignmentResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConcurrentUpdatingException"}, + {"shape":"InternalFailureException"} + ] + }, + "CreateIngestion":{ + "name":"CreateIngestion", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions/{IngestionId}" + }, + "input":{"shape":"CreateIngestionRequest"}, + "output":{"shape":"CreateIngestionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceExistsException"}, + {"shape":"InternalFailureException"} + ] + }, + "CreateTemplate":{ + "name":"CreateTemplate", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}" + }, + "input":{"shape":"CreateTemplateRequest"}, + "output":{"shape":"CreateTemplateResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "CreateTemplateAlias":{ + "name":"CreateTemplateAlias", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}" + }, + "input":{"shape":"CreateTemplateAliasRequest"}, + "output":{"shape":"CreateTemplateAliasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "DeleteDashboard":{ + "name":"DeleteDashboard", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}" + }, + "input":{"shape":"DeleteDashboardRequest"}, + "output":{"shape":"DeleteDashboardResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "DeleteDataSet":{ + "name":"DeleteDataSet", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}" + }, + "input":{"shape":"DeleteDataSetRequest"}, + "output":{"shape":"DeleteDataSetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} + ] + }, + "DeleteDataSource":{ + "name":"DeleteDataSource", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/data-sources/{DataSourceId}" + }, + "input":{"shape":"DeleteDataSourceRequest"}, + "output":{"shape":"DeleteDataSourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} + ] + }, "DeleteGroup":{ "name":"DeleteGroup", "http":{ @@ -85,6 +281,57 @@ {"shape":"ResourceUnavailableException"} ] }, + "DeleteIAMPolicyAssignment":{ + "name":"DeleteIAMPolicyAssignment", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/namespace/{Namespace}/iam-policy-assignments/{AssignmentName}" + }, + "input":{"shape":"DeleteIAMPolicyAssignmentRequest"}, + "output":{"shape":"DeleteIAMPolicyAssignmentResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConcurrentUpdatingException"}, + {"shape":"InternalFailureException"} + ] + }, + "DeleteTemplate":{ + "name":"DeleteTemplate", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}" + }, + "input":{"shape":"DeleteTemplateRequest"}, + "output":{"shape":"DeleteTemplateResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "DeleteTemplateAlias":{ + "name":"DeleteTemplateAlias", + "http":{ + "method":"DELETE", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}" + }, + "input":{"shape":"DeleteTemplateAliasRequest"}, + "output":{"shape":"DeleteTemplateAliasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, "DeleteUser":{ "name":"DeleteUser", "http":{ @@ -119,184 +366,214 @@ {"shape":"ResourceUnavailableException"} ] }, - "DescribeGroup":{ - "name":"DescribeGroup", + "DescribeDashboard":{ + "name":"DescribeDashboard", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}" + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}" }, - "input":{"shape":"DescribeGroupRequest"}, - "output":{"shape":"DescribeGroupResponse"}, + "input":{"shape":"DescribeDashboardRequest"}, + "output":{"shape":"DescribeDashboardResponse"}, "errors":[ - {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, - {"shape":"PreconditionNotMetException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} ] }, - "DescribeUser":{ - "name":"DescribeUser", + "DescribeDashboardPermissions":{ + "name":"DescribeDashboardPermissions", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}" + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/permissions" }, - "input":{"shape":"DescribeUserRequest"}, - "output":{"shape":"DescribeUserResponse"}, + "input":{"shape":"DescribeDashboardPermissionsRequest"}, + "output":{"shape":"DescribeDashboardPermissionsResponse"}, "errors":[ - {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} ] }, - "GetDashboardEmbedUrl":{ - "name":"GetDashboardEmbedUrl", + "DescribeDataSet":{ + "name":"DescribeDataSet", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/embed-url" + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}" }, - "input":{"shape":"GetDashboardEmbedUrlRequest"}, - "output":{"shape":"GetDashboardEmbedUrlResponse"}, + "input":{"shape":"DescribeDataSetRequest"}, + "output":{"shape":"DescribeDataSetResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceExistsException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"PreconditionNotMetException"}, - {"shape":"DomainNotWhitelistedException"}, - {"shape":"QuickSightUserNotFoundException"}, - {"shape":"IdentityTypeNotSupportedException"}, - {"shape":"SessionLifetimeInMinutesInvalidException"}, - {"shape":"UnsupportedUserEditionException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} ] }, - "ListGroupMemberships":{ - "name":"ListGroupMemberships", + "DescribeDataSetPermissions":{ + "name":"DescribeDataSetPermissions", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}/members" + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/permissions" }, - "input":{"shape":"ListGroupMembershipsRequest"}, - "output":{"shape":"ListGroupMembershipsResponse"}, + "input":{"shape":"DescribeDataSetPermissionsRequest"}, + "output":{"shape":"DescribeDataSetPermissionsResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"InvalidNextTokenException"}, - {"shape":"PreconditionNotMetException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} ] }, - "ListGroups":{ - "name":"ListGroups", + "DescribeDataSource":{ + "name":"DescribeDataSource", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups" + "requestUri":"/accounts/{AwsAccountId}/data-sources/{DataSourceId}" }, - "input":{"shape":"ListGroupsRequest"}, - "output":{"shape":"ListGroupsResponse"}, + "input":{"shape":"DescribeDataSourceRequest"}, + "output":{"shape":"DescribeDataSourceResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"InvalidNextTokenException"}, - {"shape":"PreconditionNotMetException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} ] }, - "ListUserGroups":{ - "name":"ListUserGroups", + "DescribeDataSourcePermissions":{ + "name":"DescribeDataSourcePermissions", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}/groups" + "requestUri":"/accounts/{AwsAccountId}/data-sources/{DataSourceId}/permissions" }, - "input":{"shape":"ListUserGroupsRequest"}, - "output":{"shape":"ListUserGroupsResponse"}, + "input":{"shape":"DescribeDataSourcePermissionsRequest"}, + "output":{"shape":"DescribeDataSourcePermissionsResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} ] }, - "ListUsers":{ - "name":"ListUsers", + "DescribeGroup":{ + "name":"DescribeGroup", "http":{ "method":"GET", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users" + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}" }, - "input":{"shape":"ListUsersRequest"}, - "output":{"shape":"ListUsersResponse"}, + "input":{"shape":"DescribeGroupRequest"}, + "output":{"shape":"DescribeGroupResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"InvalidNextTokenException"}, + {"shape":"PreconditionNotMetException"}, {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ] }, - "RegisterUser":{ - "name":"RegisterUser", + "DescribeIAMPolicyAssignment":{ + "name":"DescribeIAMPolicyAssignment", "http":{ - "method":"POST", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users" + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/{AssignmentName}" }, - "input":{"shape":"RegisterUserRequest"}, - "output":{"shape":"RegisterUserResponse"}, + "input":{"shape":"DescribeIAMPolicyAssignmentRequest"}, + "output":{"shape":"DescribeIAMPolicyAssignmentResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"LimitExceededException"}, - {"shape":"ResourceExistsException"}, - {"shape":"PreconditionNotMetException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"InvalidNextTokenException"}, + {"shape":"InternalFailureException"} ] }, - "UpdateGroup":{ - "name":"UpdateGroup", + "DescribeIngestion":{ + "name":"DescribeIngestion", "http":{ - "method":"PUT", - "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}" + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions/{IngestionId}" }, - "input":{"shape":"UpdateGroupRequest"}, - "output":{"shape":"UpdateGroupResponse"}, + "input":{"shape":"DescribeIngestionRequest"}, + "output":{"shape":"DescribeIngestionResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"PreconditionNotMetException"}, - {"shape":"InternalFailureException"}, - {"shape":"ResourceUnavailableException"} + {"shape":"ResourceExistsException"}, + {"shape":"InternalFailureException"} ] }, - "UpdateUser":{ - "name":"UpdateUser", + "DescribeTemplate":{ + "name":"DescribeTemplate", "http":{ - "method":"PUT", + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}" + }, + "input":{"shape":"DescribeTemplateRequest"}, + "output":{"shape":"DescribeTemplateResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "DescribeTemplateAlias":{ + "name":"DescribeTemplateAlias", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}" + }, + "input":{"shape":"DescribeTemplateAliasRequest"}, + "output":{"shape":"DescribeTemplateAliasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "DescribeTemplatePermissions":{ + "name":"DescribeTemplatePermissions", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/permissions" + }, + "input":{"shape":"DescribeTemplatePermissionsRequest"}, + "output":{"shape":"DescribeTemplatePermissionsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "DescribeUser":{ + "name":"DescribeUser", + "http":{ + "method":"GET", "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}" }, - "input":{"shape":"UpdateUserRequest"}, - "output":{"shape":"UpdateUserResponse"}, + "input":{"shape":"DescribeUserRequest"}, + "output":{"shape":"DescribeUserResponse"}, "errors":[ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValueException"}, @@ -305,133 +582,4097 @@ {"shape":"InternalFailureException"}, {"shape":"ResourceUnavailableException"} ] - } - }, - "shapes":{ - "AccessDeniedException":{ + }, + "GetDashboardEmbedUrl":{ + "name":"GetDashboardEmbedUrl", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/embed-url" + }, + "input":{"shape":"GetDashboardEmbedUrlRequest"}, + "output":{"shape":"GetDashboardEmbedUrlResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"DomainNotWhitelistedException"}, + {"shape":"QuickSightUserNotFoundException"}, + {"shape":"IdentityTypeNotSupportedException"}, + {"shape":"SessionLifetimeInMinutesInvalidException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListDashboardVersions":{ + "name":"ListDashboardVersions", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/versions" + }, + "input":{"shape":"ListDashboardVersionsRequest"}, + "output":{"shape":"ListDashboardVersionsResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListDashboards":{ + "name":"ListDashboards", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/dashboards" + }, + "input":{"shape":"ListDashboardsRequest"}, + "output":{"shape":"ListDashboardsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListDataSets":{ + "name":"ListDataSets", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sets" + }, + "input":{"shape":"ListDataSetsRequest"}, + "output":{"shape":"ListDataSetsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListDataSources":{ + "name":"ListDataSources", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sources" + }, + "input":{"shape":"ListDataSourcesRequest"}, + "output":{"shape":"ListDataSourcesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListGroupMemberships":{ + "name":"ListGroupMemberships", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}/members" + }, + "input":{"shape":"ListGroupMembershipsRequest"}, + "output":{"shape":"ListGroupMembershipsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "ListGroups":{ + "name":"ListGroups", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups" + }, + "input":{"shape":"ListGroupsRequest"}, + "output":{"shape":"ListGroupsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "ListIAMPolicyAssignments":{ + "name":"ListIAMPolicyAssignments", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments" + }, + "input":{"shape":"ListIAMPolicyAssignmentsRequest"}, + "output":{"shape":"ListIAMPolicyAssignmentsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListIAMPolicyAssignmentsForUser":{ + "name":"ListIAMPolicyAssignmentsForUser", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}/iam-policy-assignments" + }, + "input":{"shape":"ListIAMPolicyAssignmentsForUserRequest"}, + "output":{"shape":"ListIAMPolicyAssignmentsForUserResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConcurrentUpdatingException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListIngestions":{ + "name":"ListIngestions", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions" + }, + "input":{"shape":"ListIngestionsRequest"}, + "output":{"shape":"ListIngestionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceExistsException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/resources/{ResourceArn}/tags" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListTemplateAliases":{ + "name":"ListTemplateAliases", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/aliases" + }, + "input":{"shape":"ListTemplateAliasesRequest"}, + "output":{"shape":"ListTemplateAliasesResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListTemplateVersions":{ + "name":"ListTemplateVersions", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/versions" + }, + "input":{"shape":"ListTemplateVersionsRequest"}, + "output":{"shape":"ListTemplateVersionsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListTemplates":{ + "name":"ListTemplates", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/templates" + }, + "input":{"shape":"ListTemplatesRequest"}, + "output":{"shape":"ListTemplatesResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "ListUserGroups":{ + "name":"ListUserGroups", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}/groups" + }, + "input":{"shape":"ListUserGroupsRequest"}, + "output":{"shape":"ListUserGroupsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "ListUsers":{ + "name":"ListUsers", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users" + }, + "input":{"shape":"ListUsersRequest"}, + "output":{"shape":"ListUsersResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "RegisterUser":{ + "name":"RegisterUser", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users" + }, + "input":{"shape":"RegisterUserRequest"}, + "output":{"shape":"RegisterUserResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceExistsException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/resources/{ResourceArn}/tags" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ] + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/resources/{ResourceArn}/tags" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDashboard":{ + "name":"UpdateDashboard", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}" + }, + "input":{"shape":"UpdateDashboardRequest"}, + "output":{"shape":"UpdateDashboardResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDashboardPermissions":{ + "name":"UpdateDashboardPermissions", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/permissions" + }, + "input":{"shape":"UpdateDashboardPermissionsRequest"}, + "output":{"shape":"UpdateDashboardPermissionsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"ConflictException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDashboardPublishedVersion":{ + "name":"UpdateDashboardPublishedVersion", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/dashboards/{DashboardId}/versions/{VersionNumber}" + }, + "input":{"shape":"UpdateDashboardPublishedVersionRequest"}, + "output":{"shape":"UpdateDashboardPublishedVersionResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDataSet":{ + "name":"UpdateDataSet", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}" + }, + "input":{"shape":"UpdateDataSetRequest"}, + "output":{"shape":"UpdateDataSetResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDataSetPermissions":{ + "name":"UpdateDataSetPermissions", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/data-sets/{DataSetId}/permissions" + }, + "input":{"shape":"UpdateDataSetPermissionsRequest"}, + "output":{"shape":"UpdateDataSetPermissionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDataSource":{ + "name":"UpdateDataSource", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/data-sources/{DataSourceId}" + }, + "input":{"shape":"UpdateDataSourceRequest"}, + "output":{"shape":"UpdateDataSourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateDataSourcePermissions":{ + "name":"UpdateDataSourcePermissions", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/data-sources/{DataSourceId}/permissions" + }, + "input":{"shape":"UpdateDataSourcePermissionsRequest"}, + "output":{"shape":"UpdateDataSourcePermissionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateGroup":{ + "name":"UpdateGroup", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}" + }, + "input":{"shape":"UpdateGroupRequest"}, + "output":{"shape":"UpdateGroupResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + }, + "UpdateIAMPolicyAssignment":{ + "name":"UpdateIAMPolicyAssignment", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/{AssignmentName}" + }, + "input":{"shape":"UpdateIAMPolicyAssignmentRequest"}, + "output":{"shape":"UpdateIAMPolicyAssignmentResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConcurrentUpdatingException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateTemplate":{ + "name":"UpdateTemplate", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}" + }, + "input":{"shape":"UpdateTemplateRequest"}, + "output":{"shape":"UpdateTemplateResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"LimitExceededException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateTemplateAlias":{ + "name":"UpdateTemplateAlias", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}" + }, + "input":{"shape":"UpdateTemplateAliasRequest"}, + "output":{"shape":"UpdateTemplateAliasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateTemplatePermissions":{ + "name":"UpdateTemplatePermissions", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/templates/{TemplateId}/permissions" + }, + "input":{"shape":"UpdateTemplatePermissionsRequest"}, + "output":{"shape":"UpdateTemplatePermissionsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"UnsupportedUserEditionException"}, + {"shape":"InternalFailureException"} + ] + }, + "UpdateUser":{ + "name":"UpdateUser", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}" + }, + "input":{"shape":"UpdateUserRequest"}, + "output":{"shape":"UpdateUserResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ] + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":401}, + "exception":true + }, + "ActionList":{ + "type":"list", + "member":{"shape":"String"}, + "max":16, + "min":1 + }, + "ActiveIAMPolicyAssignment":{ + "type":"structure", + "members":{ + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "PolicyArn":{"shape":"Arn"} + } + }, + "ActiveIAMPolicyAssignmentList":{ + "type":"list", + "member":{"shape":"ActiveIAMPolicyAssignment"} + }, + "AdHocFilteringOption":{ + "type":"structure", + "members":{ + "AvailabilityStatus":{"shape":"DashboardBehavior"} + } + }, + "AliasName":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\w\\-]+|(\\$LATEST)|(\\$PUBLISHED)" + }, + "AmazonElasticsearchParameters":{ + "type":"structure", + "required":["Domain"], + "members":{ + "Domain":{"shape":"Domain"} + } + }, + "Arn":{"type":"string"}, + "AssignmentStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DRAFT", + "DISABLED" + ] + }, + "AthenaParameters":{ + "type":"structure", + "members":{ + "WorkGroup":{"shape":"WorkGroup"} + } + }, + "AuroraParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Database" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} + } + }, + "AuroraPostgreSqlParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Database" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} + } + }, + "AwsAccountId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"^[0-9]{12}$" + }, + "AwsIotAnalyticsParameters":{ + "type":"structure", + "required":["DataSetName"], + "members":{ + "DataSetName":{"shape":"DataSetName"} + } + }, + "Boolean":{"type":"boolean"}, + "CalculatedColumn":{ + "type":"structure", + "required":[ + "ColumnName", + "ColumnId", + "Expression" + ], + "members":{ + "ColumnName":{"shape":"ColumnName"}, + "ColumnId":{"shape":"ColumnId"}, + "Expression":{"shape":"Expression"} + } + }, + "CalculatedColumnList":{ + "type":"list", + "member":{"shape":"CalculatedColumn"}, + "max":128, + "min":1 + }, + "CancelIngestionRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId", + "IngestionId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"string", + "location":"uri", + "locationName":"DataSetId" + }, + "IngestionId":{ + "shape":"IngestionId", + "location":"uri", + "locationName":"IngestionId" + } + } + }, + "CancelIngestionResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "IngestionId":{"shape":"IngestionId"}, + "RequestId":{"shape":"string"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CastColumnTypeOperation":{ + "type":"structure", + "required":[ + "ColumnName", + "NewColumnType" + ], + "members":{ + "ColumnName":{"shape":"ColumnName"}, + "NewColumnType":{"shape":"ColumnDataType"}, + "Format":{"shape":"TypeCastFormat"} + } + }, + "Catalog":{ + "type":"string", + "max":128 + }, + "ClusterId":{ + "type":"string", + "max":64, + "min":1 + }, + "ColumnDataType":{ + "type":"string", + "enum":[ + "STRING", + "INTEGER", + "DECIMAL", + "DATETIME" + ] + }, + "ColumnGroup":{ + "type":"structure", + "members":{ + "GeoSpatialColumnGroup":{"shape":"GeoSpatialColumnGroup"} + } + }, + "ColumnGroupColumnSchema":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"} + } + }, + "ColumnGroupColumnSchemaList":{ + "type":"list", + "member":{"shape":"ColumnGroupColumnSchema"}, + "max":500 + }, + "ColumnGroupList":{ + "type":"list", + "member":{"shape":"ColumnGroup"}, + "max":8, + "min":1 + }, + "ColumnGroupName":{ + "type":"string", + "max":64, + "min":1 + }, + "ColumnGroupSchema":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "ColumnGroupColumnSchemaList":{"shape":"ColumnGroupColumnSchemaList"} + } + }, + "ColumnGroupSchemaList":{ + "type":"list", + "member":{"shape":"ColumnGroupSchema"}, + "max":500 + }, + "ColumnId":{ + "type":"string", + "max":64, + "min":1 + }, + "ColumnList":{ + "type":"list", + "member":{"shape":"ColumnName"}, + "max":16, + "min":1 + }, + "ColumnName":{ + "type":"string", + "max":128, + "min":1 + }, + "ColumnSchema":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "DataType":{"shape":"String"}, + "GeographicRole":{"shape":"String"} + } + }, + "ColumnSchemaList":{ + "type":"list", + "member":{"shape":"ColumnSchema"}, + "max":500 + }, + "ColumnTag":{ + "type":"structure", + "members":{ + "ColumnGeographicRole":{"shape":"GeoSpatialDataRole"} + } + }, + "ColumnTagList":{ + "type":"list", + "member":{"shape":"ColumnTag"}, + "max":16, + "min":1 + }, + "ConcurrentUpdatingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateColumnsOperation":{ + "type":"structure", + "required":["Columns"], + "members":{ + "Columns":{"shape":"CalculatedColumnList"} + } + }, + "CreateDashboardRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId", + "Name", + "SourceEntity" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"DashboardId" + }, + "Name":{"shape":"DashboardName"}, + "Parameters":{"shape":"Parameters"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "SourceEntity":{"shape":"DashboardSourceEntity"}, + "Tags":{"shape":"TagList"}, + "VersionDescription":{"shape":"VersionDescription"}, + "DashboardPublishOptions":{"shape":"DashboardPublishOptions"} + } + }, + "CreateDashboardResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "VersionArn":{"shape":"Arn"}, + "DashboardId":{"shape":"RestrictiveResourceId"}, + "CreationStatus":{"shape":"ResourceStatus"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "CreateDataSetRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId", + "Name", + "PhysicalTableMap", + "ImportMode" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "PhysicalTableMap":{"shape":"PhysicalTableMap"}, + "LogicalTableMap":{"shape":"LogicalTableMap"}, + "ImportMode":{"shape":"DataSetImportMode"}, + "ColumnGroups":{"shape":"ColumnGroupList"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "RowLevelPermissionDataSet":{"shape":"RowLevelPermissionDataSet"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDataSetResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "IngestionArn":{"shape":"Arn"}, + "IngestionId":{"shape":"ResourceId"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CreateDataSourceRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSourceId", + "Name", + "Type" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSourceId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "Type":{"shape":"DataSourceType"}, + "DataSourceParameters":{"shape":"DataSourceParameters"}, + "Credentials":{"shape":"DataSourceCredentials"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "VpcConnectionProperties":{"shape":"VpcConnectionProperties"}, + "SslProperties":{"shape":"SslProperties"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDataSourceResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSourceId":{"shape":"ResourceId"}, + "CreationStatus":{"shape":"ResourceStatus"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CreateGroupMembershipRequest":{ + "type":"structure", + "required":[ + "MemberName", + "GroupName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "MemberName":{ + "shape":"GroupMemberName", + "location":"uri", + "locationName":"MemberName" + }, + "GroupName":{ + "shape":"GroupName", + "location":"uri", + "locationName":"GroupName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "CreateGroupMembershipResponse":{ + "type":"structure", + "members":{ + "GroupMember":{"shape":"GroupMember"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CreateGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "GroupName":{"shape":"GroupName"}, + "Description":{"shape":"GroupDescription"}, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "CreateGroupResponse":{ + "type":"structure", + "members":{ + "Group":{"shape":"Group"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CreateIAMPolicyAssignmentRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "AssignmentName", + "AssignmentStatus", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "AssignmentStatus":{"shape":"AssignmentStatus"}, + "PolicyArn":{"shape":"Arn"}, + "Identities":{"shape":"IdentityMap"}, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "CreateIAMPolicyAssignmentResponse":{ + "type":"structure", + "members":{ + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "AssignmentId":{"shape":"String"}, + "AssignmentStatus":{"shape":"AssignmentStatus"}, + "PolicyArn":{"shape":"Arn"}, + "Identities":{"shape":"IdentityMap"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CreateIngestionRequest":{ + "type":"structure", + "required":[ + "DataSetId", + "IngestionId", + "AwsAccountId" + ], + "members":{ + "DataSetId":{ + "shape":"string", + "location":"uri", + "locationName":"DataSetId" + }, + "IngestionId":{ + "shape":"IngestionId", + "location":"uri", + "locationName":"IngestionId" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + } + } + }, + "CreateIngestionResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "IngestionId":{"shape":"IngestionId"}, + "IngestionStatus":{"shape":"IngestionStatus"}, + "RequestId":{"shape":"string"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "CreateTemplateAliasRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId", + "AliasName", + "TemplateVersionNumber" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "AliasName":{ + "shape":"AliasName", + "location":"uri", + "locationName":"AliasName" + }, + "TemplateVersionNumber":{"shape":"VersionNumber"} + } + }, + "CreateTemplateAliasResponse":{ + "type":"structure", + "members":{ + "TemplateAlias":{"shape":"TemplateAlias"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "CreateTemplateRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId", + "SourceEntity" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "Name":{"shape":"TemplateName"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "SourceEntity":{"shape":"TemplateSourceEntity"}, + "Tags":{"shape":"TagList"}, + "VersionDescription":{"shape":"VersionDescription"} + } + }, + "CreateTemplateResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "VersionArn":{"shape":"Arn"}, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "CreationStatus":{"shape":"ResourceStatus"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "CredentialPair":{ + "type":"structure", + "required":[ + "Username", + "Password" + ], + "members":{ + "Username":{"shape":"Username"}, + "Password":{"shape":"Password"} + } + }, + "CustomSql":{ + "type":"structure", + "required":[ + "DataSourceArn", + "Name", + "SqlQuery" + ], + "members":{ + "DataSourceArn":{"shape":"Arn"}, + "Name":{"shape":"CustomSqlName"}, + "SqlQuery":{"shape":"SqlQuery"}, + "Columns":{"shape":"InputColumnList"} + } + }, + "CustomSqlName":{ + "type":"string", + "max":64, + "min":1 + }, + "Dashboard":{ + "type":"structure", + "members":{ + "DashboardId":{"shape":"RestrictiveResourceId"}, + "Arn":{"shape":"Arn"}, + "Name":{"shape":"DashboardName"}, + "Version":{"shape":"DashboardVersion"}, + "CreatedTime":{"shape":"Timestamp"}, + "LastPublishedTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"} + } + }, + "DashboardBehavior":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "DashboardError":{ + "type":"structure", + "members":{ + "Type":{"shape":"DashboardErrorType"}, + "Message":{"shape":"NonEmptyString"} + } + }, + "DashboardErrorList":{ + "type":"list", + "member":{"shape":"DashboardError"}, + "min":1 + }, + "DashboardErrorType":{ + "type":"string", + "enum":[ + "DATA_SET_NOT_FOUND", + "INTERNAL_FAILURE", + "PARAMETER_VALUE_INCOMPATIBLE", + "PARAMETER_TYPE_INVALID", + "PARAMETER_NOT_FOUND", + "COLUMN_TYPE_MISMATCH", + "COLUMN_GEOGRAPHIC_ROLE_MISMATCH", + "COLUMN_REPLACEMENT_MISSING" + ] + }, + "DashboardName":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\u0020-\\u00FF]+" + }, + "DashboardPublishOptions":{ + "type":"structure", + "members":{ + "AdHocFilteringOption":{"shape":"AdHocFilteringOption"}, + "ExportToCSVOption":{"shape":"ExportToCSVOption"}, + "SheetControlsOption":{"shape":"SheetControlsOption"} + } + }, + "DashboardSourceEntity":{ + "type":"structure", + "members":{ + "SourceTemplate":{"shape":"DashboardSourceTemplate"} + } + }, + "DashboardSourceTemplate":{ + "type":"structure", + "required":[ + "DataSetReferences", + "Arn" + ], + "members":{ + "DataSetReferences":{"shape":"DataSetReferenceList"}, + "Arn":{"shape":"Arn"} + } + }, + "DashboardSummary":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DashboardId":{"shape":"RestrictiveResourceId"}, + "Name":{"shape":"DashboardName"}, + "CreatedTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"}, + "PublishedVersionNumber":{"shape":"VersionNumber"}, + "LastPublishedTime":{"shape":"Timestamp"} + } + }, + "DashboardSummaryList":{ + "type":"list", + "member":{"shape":"DashboardSummary"}, + "max":100 + }, + "DashboardUIState":{ + "type":"string", + "enum":[ + "EXPANDED", + "COLLAPSED" + ] + }, + "DashboardVersion":{ + "type":"structure", + "members":{ + "CreatedTime":{"shape":"Timestamp"}, + "Errors":{"shape":"DashboardErrorList"}, + "VersionNumber":{"shape":"VersionNumber"}, + "Status":{"shape":"ResourceStatus"}, + "Arn":{"shape":"Arn"}, + "SourceEntityArn":{"shape":"Arn"}, + "Description":{"shape":"VersionDescription"} + } + }, + "DashboardVersionSummary":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "CreatedTime":{"shape":"Timestamp"}, + "VersionNumber":{"shape":"VersionNumber"}, + "Status":{"shape":"ResourceStatus"}, + "SourceEntityArn":{"shape":"Arn"}, + "Description":{"shape":"VersionDescription"} + } + }, + "DashboardVersionSummaryList":{ + "type":"list", + "member":{"shape":"DashboardVersionSummary"}, + "max":100 + }, + "DataSet":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "CreatedTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"}, + "PhysicalTableMap":{"shape":"PhysicalTableMap"}, + "LogicalTableMap":{"shape":"LogicalTableMap"}, + "OutputColumns":{"shape":"OutputColumnList"}, + "ImportMode":{"shape":"DataSetImportMode"}, + "ConsumedSpiceCapacityInBytes":{"shape":"Long"}, + "ColumnGroups":{"shape":"ColumnGroupList"}, + "RowLevelPermissionDataSet":{"shape":"RowLevelPermissionDataSet"} + } + }, + "DataSetConfiguration":{ + "type":"structure", + "members":{ + "Placeholder":{"shape":"String"}, + "DataSetSchema":{"shape":"DataSetSchema"}, + "ColumnGroupSchemaList":{"shape":"ColumnGroupSchemaList"} + } + }, + "DataSetConfigurationList":{ + "type":"list", + "member":{"shape":"DataSetConfiguration"}, + "max":30 + }, + "DataSetImportMode":{ + "type":"string", + "enum":[ + "SPICE", + "DIRECT_QUERY" + ] + }, + "DataSetName":{ + "type":"string", + "max":128, + "min":1 + }, + "DataSetReference":{ + "type":"structure", + "required":[ + "DataSetPlaceholder", + "DataSetArn" + ], + "members":{ + "DataSetPlaceholder":{"shape":"NonEmptyString"}, + "DataSetArn":{"shape":"Arn"} + } + }, + "DataSetReferenceList":{ + "type":"list", + "member":{"shape":"DataSetReference"}, + "min":1 + }, + "DataSetSchema":{ + "type":"structure", + "members":{ + "ColumnSchemaList":{"shape":"ColumnSchemaList"} + } + }, + "DataSetSummary":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "CreatedTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"}, + "ImportMode":{"shape":"DataSetImportMode"}, + "RowLevelPermissionDataSet":{"shape":"RowLevelPermissionDataSet"} + } + }, + "DataSetSummaryList":{ + "type":"list", + "member":{"shape":"DataSetSummary"} + }, + "DataSource":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSourceId":{"shape":"ResourceId"}, + "Name":{"shape":"ResourceName"}, + "Type":{"shape":"DataSourceType"}, + "Status":{"shape":"ResourceStatus"}, + "CreatedTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"}, + "DataSourceParameters":{"shape":"DataSourceParameters"}, + "VpcConnectionProperties":{"shape":"VpcConnectionProperties"}, + "SslProperties":{"shape":"SslProperties"}, + "ErrorInfo":{"shape":"DataSourceErrorInfo"} + } + }, + "DataSourceCredentials":{ + "type":"structure", + "members":{ + "CredentialPair":{"shape":"CredentialPair"} + }, + "sensitive":true + }, + "DataSourceErrorInfo":{ + "type":"structure", + "members":{ + "Type":{"shape":"DataSourceErrorInfoType"}, + "Message":{"shape":"String"} + } + }, + "DataSourceErrorInfoType":{ + "type":"string", + "enum":[ + "TIMEOUT", + "ENGINE_VERSION_NOT_SUPPORTED", + "UNKNOWN_HOST", + "GENERIC_SQL_FAILURE", + "CONFLICT", + "UNKNOWN" + ] + }, + "DataSourceList":{ + "type":"list", + "member":{"shape":"DataSource"} + }, + "DataSourceParameters":{ + "type":"structure", + "members":{ + "AmazonElasticsearchParameters":{"shape":"AmazonElasticsearchParameters"}, + "AthenaParameters":{"shape":"AthenaParameters"}, + "AuroraParameters":{"shape":"AuroraParameters"}, + "AuroraPostgreSqlParameters":{"shape":"AuroraPostgreSqlParameters"}, + "AwsIotAnalyticsParameters":{"shape":"AwsIotAnalyticsParameters"}, + "JiraParameters":{"shape":"JiraParameters"}, + "MariaDbParameters":{"shape":"MariaDbParameters"}, + "MySqlParameters":{"shape":"MySqlParameters"}, + "PostgreSqlParameters":{"shape":"PostgreSqlParameters"}, + "PrestoParameters":{"shape":"PrestoParameters"}, + "RdsParameters":{"shape":"RdsParameters"}, + "RedshiftParameters":{"shape":"RedshiftParameters"}, + "S3Parameters":{"shape":"S3Parameters"}, + "ServiceNowParameters":{"shape":"ServiceNowParameters"}, + "SnowflakeParameters":{"shape":"SnowflakeParameters"}, + "SparkParameters":{"shape":"SparkParameters"}, + "SqlServerParameters":{"shape":"SqlServerParameters"}, + "TeradataParameters":{"shape":"TeradataParameters"}, + "TwitterParameters":{"shape":"TwitterParameters"} + } + }, + "DataSourceType":{ + "type":"string", + "enum":[ + "ADOBE_ANALYTICS", + "AMAZON_ELASTICSEARCH", + "ATHENA", + "AURORA", + "AURORA_POSTGRESQL", + "AWS_IOT_ANALYTICS", + "GITHUB", + "JIRA", + "MARIADB", + "MYSQL", + "POSTGRESQL", + "PRESTO", + "REDSHIFT", + "S3", + "SALESFORCE", + "SERVICENOW", + "SNOWFLAKE", + "SPARK", + "SQLSERVER", + "TERADATA", + "TWITTER" + ] + }, + "Database":{ + "type":"string", + "max":128, + "min":1 + }, + "DateTimeParameter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"NonEmptyString"}, + "Values":{"shape":"TimestampList"} + } + }, + "DateTimeParameterList":{ + "type":"list", + "member":{"shape":"DateTimeParameter"}, + "max":100 + }, + "DecimalParameter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"NonEmptyString"}, + "Values":{"shape":"DoubleList"} + } + }, + "DecimalParameterList":{ + "type":"list", + "member":{"shape":"DecimalParameter"}, + "max":100 + }, + "DeleteDashboardRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"DashboardId" + }, + "VersionNumber":{ + "shape":"VersionNumber", + "location":"querystring", + "locationName":"version-number" + } + } + }, + "DeleteDashboardResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "Arn":{"shape":"Arn"}, + "DashboardId":{"shape":"RestrictiveResourceId"}, + "RequestId":{"shape":"String"} + } + }, + "DeleteDataSetRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + } + } + }, + "DeleteDataSetResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteDataSourceRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSourceId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSourceId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSourceId" + } + } + }, + "DeleteDataSourceResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSourceId":{"shape":"ResourceId"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteGroupMembershipRequest":{ + "type":"structure", + "required":[ + "MemberName", + "GroupName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "MemberName":{ + "shape":"GroupMemberName", + "location":"uri", + "locationName":"MemberName" + }, + "GroupName":{ + "shape":"GroupName", + "location":"uri", + "locationName":"GroupName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DeleteGroupMembershipResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "GroupName":{ + "shape":"GroupName", + "location":"uri", + "locationName":"GroupName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DeleteGroupResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteIAMPolicyAssignmentRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "AssignmentName", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "AssignmentName":{ + "shape":"IAMPolicyAssignmentName", + "location":"uri", + "locationName":"AssignmentName" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DeleteIAMPolicyAssignmentResponse":{ + "type":"structure", + "members":{ + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteTemplateAliasRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId", + "AliasName" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "AliasName":{ + "shape":"AliasName", + "location":"uri", + "locationName":"AliasName" + } + } + }, + "DeleteTemplateAliasResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "AliasName":{"shape":"AliasName"}, + "Arn":{"shape":"Arn"}, + "RequestId":{"shape":"String"} + } + }, + "DeleteTemplateRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "VersionNumber":{ + "shape":"VersionNumber", + "location":"querystring", + "locationName":"version-number" + } + } + }, + "DeleteTemplateResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Arn":{"shape":"Arn"}, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteUserByPrincipalIdRequest":{ + "type":"structure", + "required":[ + "PrincipalId", + "AwsAccountId", + "Namespace" + ], + "members":{ + "PrincipalId":{ + "shape":"String", + "location":"uri", + "locationName":"PrincipalId" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DeleteUserByPrincipalIdResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DeleteUserRequest":{ + "type":"structure", + "required":[ + "UserName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "UserName":{ + "shape":"UserName", + "location":"uri", + "locationName":"UserName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DeleteUserResponse":{ + "type":"structure", + "members":{ + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "Delimiter":{ + "type":"string", + "max":1, + "min":1 + }, + "DescribeDashboardPermissionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"DashboardId" + } + } + }, + "DescribeDashboardPermissionsResponse":{ + "type":"structure", + "members":{ + "DashboardId":{"shape":"RestrictiveResourceId"}, + "DashboardArn":{"shape":"Arn"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "DescribeDashboardRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"DashboardId" + }, + "VersionNumber":{ + "shape":"VersionNumber", + "location":"querystring", + "locationName":"version-number" + }, + "AliasName":{ + "shape":"AliasName", + "location":"querystring", + "locationName":"alias-name" + } + } + }, + "DescribeDashboardResponse":{ + "type":"structure", + "members":{ + "Dashboard":{"shape":"Dashboard"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "DescribeDataSetPermissionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + } + } + }, + "DescribeDataSetPermissionsResponse":{ + "type":"structure", + "members":{ + "DataSetArn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeDataSetRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + } + } + }, + "DescribeDataSetResponse":{ + "type":"structure", + "members":{ + "DataSet":{"shape":"DataSet"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeDataSourcePermissionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSourceId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSourceId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSourceId" + } + } + }, + "DescribeDataSourcePermissionsResponse":{ + "type":"structure", + "members":{ + "DataSourceArn":{"shape":"Arn"}, + "DataSourceId":{"shape":"ResourceId"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeDataSourceRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSourceId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSourceId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSourceId" + } + } + }, + "DescribeDataSourceResponse":{ + "type":"structure", + "members":{ + "DataSource":{"shape":"DataSource"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeGroupRequest":{ + "type":"structure", + "required":[ + "GroupName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "GroupName":{ + "shape":"GroupName", + "location":"uri", + "locationName":"GroupName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DescribeGroupResponse":{ + "type":"structure", + "members":{ + "Group":{"shape":"Group"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeIAMPolicyAssignmentRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "AssignmentName", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "AssignmentName":{ + "shape":"IAMPolicyAssignmentName", + "location":"uri", + "locationName":"AssignmentName" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DescribeIAMPolicyAssignmentResponse":{ + "type":"structure", + "members":{ + "IAMPolicyAssignment":{"shape":"IAMPolicyAssignment"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeIngestionRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId", + "IngestionId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"string", + "location":"uri", + "locationName":"DataSetId" + }, + "IngestionId":{ + "shape":"IngestionId", + "location":"uri", + "locationName":"IngestionId" + } + } + }, + "DescribeIngestionResponse":{ + "type":"structure", + "members":{ + "Ingestion":{"shape":"Ingestion"}, + "RequestId":{"shape":"string"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeTemplateAliasRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId", + "AliasName" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "AliasName":{ + "shape":"AliasName", + "location":"uri", + "locationName":"AliasName" + } + } + }, + "DescribeTemplateAliasResponse":{ + "type":"structure", + "members":{ + "TemplateAlias":{"shape":"TemplateAlias"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "DescribeTemplatePermissionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + } + } + }, + "DescribeTemplatePermissionsResponse":{ + "type":"structure", + "members":{ + "TemplateId":{"shape":"RestrictiveResourceId"}, + "TemplateArn":{"shape":"Arn"}, + "Permissions":{"shape":"ResourcePermissionList"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeTemplateRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "VersionNumber":{ + "shape":"VersionNumber", + "location":"querystring", + "locationName":"version-number" + }, + "AliasName":{ + "shape":"AliasName", + "location":"querystring", + "locationName":"alias-name" + } + } + }, + "DescribeTemplateResponse":{ + "type":"structure", + "members":{ + "Template":{"shape":"Template"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "DescribeUserRequest":{ + "type":"structure", + "required":[ + "UserName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "UserName":{ + "shape":"UserName", + "location":"uri", + "locationName":"UserName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DescribeUserResponse":{ + "type":"structure", + "members":{ + "User":{"shape":"User"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "Domain":{ + "type":"string", + "max":64, + "min":1 + }, + "DomainNotWhitelistedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "Double":{"type":"double"}, + "DoubleList":{ + "type":"list", + "member":{"shape":"Double"} + }, + "EmbeddingUrl":{ + "type":"string", + "sensitive":true + }, + "ErrorInfo":{ + "type":"structure", + "members":{ + "Type":{"shape":"IngestionErrorType"}, + "Message":{"shape":"string"} + } + }, + "ExceptionResourceType":{ + "type":"string", + "enum":[ + "USER", + "GROUP", + "NAMESPACE", + "ACCOUNT_SETTINGS", + "IAMPOLICY_ASSIGNMENT", + "DATA_SOURCE", + "DATA_SET", + "VPC_CONNECTION", + "INGESTION" + ] + }, + "ExportToCSVOption":{ + "type":"structure", + "members":{ + "AvailabilityStatus":{"shape":"DashboardBehavior"} + } + }, + "Expression":{ + "type":"string", + "max":4096, + "min":1 + }, + "FileFormat":{ + "type":"string", + "enum":[ + "CSV", + "TSV", + "CLF", + "ELF", + "XLSX", + "JSON" + ] + }, + "FilterOperation":{ + "type":"structure", + "required":["ConditionExpression"], + "members":{ + "ConditionExpression":{"shape":"Expression"} + } + }, + "GeoSpatialColumnGroup":{ + "type":"structure", + "required":[ + "Name", + "CountryCode", + "Columns" + ], + "members":{ + "Name":{"shape":"ColumnGroupName"}, + "CountryCode":{"shape":"GeoSpatialCountryCode"}, + "Columns":{"shape":"ColumnList"} + } + }, + "GeoSpatialCountryCode":{ + "type":"string", + "enum":["US"] + }, + "GeoSpatialDataRole":{ + "type":"string", + "enum":[ + "COUNTRY", + "STATE", + "COUNTY", + "CITY", + "POSTCODE", + "LONGITUDE", + "LATITUDE" + ] + }, + "GetDashboardEmbedUrlRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId", + "IdentityType" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"DashboardId" + }, + "IdentityType":{ + "shape":"IdentityType", + "location":"querystring", + "locationName":"creds-type" + }, + "SessionLifetimeInMinutes":{ + "shape":"SessionLifetimeInMinutes", + "location":"querystring", + "locationName":"session-lifetime" + }, + "UndoRedoDisabled":{ + "shape":"boolean", + "location":"querystring", + "locationName":"undo-redo-disabled" + }, + "ResetDisabled":{ + "shape":"boolean", + "location":"querystring", + "locationName":"reset-disabled" + }, + "UserArn":{ + "shape":"Arn", + "location":"querystring", + "locationName":"user-arn" + } + } + }, + "GetDashboardEmbedUrlResponse":{ + "type":"structure", + "members":{ + "EmbedUrl":{"shape":"EmbeddingUrl"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "Group":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "GroupName":{"shape":"GroupName"}, + "Description":{"shape":"GroupDescription"}, + "PrincipalId":{"shape":"String"} + } + }, + "GroupDescription":{ + "type":"string", + "max":512, + "min":1 + }, + "GroupList":{ + "type":"list", + "member":{"shape":"Group"} + }, + "GroupMember":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "MemberName":{"shape":"GroupMemberName"} + } + }, + "GroupMemberList":{ + "type":"list", + "member":{"shape":"GroupMember"} + }, + "GroupMemberName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\u0020-\\u00FF]+" + }, + "GroupName":{ + "type":"string", + "min":1, + "pattern":"[\\u0020-\\u00FF]+" + }, + "Host":{ + "type":"string", + "max":256, + "min":1 + }, + "IAMPolicyAssignment":{ + "type":"structure", + "members":{ + "AwsAccountId":{"shape":"AwsAccountId"}, + "AssignmentId":{"shape":"String"}, + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "PolicyArn":{"shape":"Arn"}, + "Identities":{"shape":"IdentityMap"}, + "AssignmentStatus":{"shape":"AssignmentStatus"} + } + }, + "IAMPolicyAssignmentName":{ + "type":"string", + "min":1, + "pattern":"(?=^.{2,256}$)(?!.*\\s)[0-9a-zA-Z-_.:=+@]*$" + }, + "IAMPolicyAssignmentSummary":{ + "type":"structure", + "members":{ + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "AssignmentStatus":{"shape":"AssignmentStatus"} + } + }, + "IAMPolicyAssignmentSummaryList":{ + "type":"list", + "member":{"shape":"IAMPolicyAssignmentSummary"} + }, + "IdentityMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"IdentityNameList"} + }, + "IdentityName":{ + "type":"string", + "min":1, + "pattern":"[\\u0020-\\u00FF]+" + }, + "IdentityNameList":{ + "type":"list", + "member":{"shape":"IdentityName"} + }, + "IdentityType":{ + "type":"string", + "enum":[ + "IAM", + "QUICKSIGHT" + ] + }, + "IdentityTypeNotSupportedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":403}, + "exception":true + }, + "Ingestion":{ + "type":"structure", + "required":[ + "Arn", + "IngestionStatus", + "CreatedTime" + ], + "members":{ + "Arn":{"shape":"Arn"}, + "IngestionId":{"shape":"IngestionId"}, + "IngestionStatus":{"shape":"IngestionStatus"}, + "ErrorInfo":{"shape":"ErrorInfo"}, + "RowInfo":{"shape":"RowInfo"}, + "QueueInfo":{"shape":"QueueInfo"}, + "CreatedTime":{"shape":"timestamp"}, + "IngestionTimeInSeconds":{ + "shape":"long", + "box":true + }, + "IngestionSizeInBytes":{ + "shape":"long", + "box":true + }, + "RequestSource":{"shape":"IngestionRequestSource"}, + "RequestType":{"shape":"IngestionRequestType"} + } + }, + "IngestionErrorType":{ + "type":"string", + "enum":[ + "FAILURE_TO_ASSUME_ROLE", + "INGESTION_SUPERSEDED", + "INGESTION_CANCELED", + "DATA_SET_DELETED", + "DATA_SET_NOT_SPICE", + "S3_UPLOADED_FILE_DELETED", + "S3_MANIFEST_ERROR", + "DATA_TOLERANCE_EXCEPTION", + "SPICE_TABLE_NOT_FOUND", + "DATA_SET_SIZE_LIMIT_EXCEEDED", + "ROW_SIZE_LIMIT_EXCEEDED", + "ACCOUNT_CAPACITY_LIMIT_EXCEEDED", + "CUSTOMER_ERROR", + "DATA_SOURCE_NOT_FOUND", + "IAM_ROLE_NOT_AVAILABLE", + "CONNECTION_FAILURE", + "SQL_TABLE_NOT_FOUND", + "PERMISSION_DENIED", + "SSL_CERTIFICATE_VALIDATION_FAILURE", + "OAUTH_TOKEN_FAILURE", + "SOURCE_API_LIMIT_EXCEEDED_FAILURE", + "PASSWORD_AUTHENTICATION_FAILURE", + "SQL_SCHEMA_MISMATCH_ERROR", + "INVALID_DATE_FORMAT", + "INVALID_DATAPREP_SYNTAX", + "SOURCE_RESOURCE_LIMIT_EXCEEDED", + "SQL_INVALID_PARAMETER_VALUE", + "QUERY_TIMEOUT", + "SQL_NUMERIC_OVERFLOW", + "UNRESOLVABLE_HOST", + "UNROUTABLE_HOST", + "SQL_EXCEPTION", + "S3_FILE_INACCESSIBLE", + "IOT_FILE_NOT_FOUND", + "IOT_DATA_SET_FILE_EMPTY", + "INVALID_DATA_SOURCE_CONFIG", + "DATA_SOURCE_AUTH_FAILED", + "DATA_SOURCE_CONNECTION_FAILED", + "FAILURE_TO_PROCESS_JSON_FILE", + "INTERNAL_SERVICE_ERROR" + ] + }, + "IngestionId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[a-zA-Z0-9-_]+$" + }, + "IngestionMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "IngestionRequestSource":{ + "type":"string", + "enum":[ + "MANUAL", + "SCHEDULED" + ] + }, + "IngestionRequestType":{ + "type":"string", + "enum":[ + "INITIAL_INGESTION", + "EDIT", + "INCREMENTAL_REFRESH", + "FULL_REFRESH" + ] + }, + "IngestionStatus":{ + "type":"string", + "enum":[ + "INITIALIZED", + "QUEUED", + "RUNNING", + "FAILED", + "COMPLETED", + "CANCELLED" + ] + }, + "Ingestions":{ + "type":"list", + "member":{"shape":"Ingestion"} + }, + "InputColumn":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{"shape":"ColumnName"}, + "Type":{"shape":"InputColumnDataType"} + } + }, + "InputColumnDataType":{ + "type":"string", + "enum":[ + "STRING", + "INTEGER", + "DECIMAL", + "DATETIME", + "BIT", + "BOOLEAN", + "JSON" + ] + }, + "InputColumnList":{ + "type":"list", + "member":{"shape":"InputColumn"}, + "max":2048, + "min":1 + }, + "InstanceId":{ + "type":"string", + "max":64, + "min":1 + }, + "IntegerParameter":{ + "type":"structure", + "required":[ + "Name", + "Values" + ], + "members":{ + "Name":{"shape":"NonEmptyString"}, + "Values":{"shape":"LongList"} + } + }, + "IntegerParameterList":{ + "type":"list", + "member":{"shape":"IntegerParameter"}, + "max":100 + }, + "InternalFailureException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidParameterValueException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "JiraParameters":{ + "type":"structure", + "required":["SiteBaseUrl"], + "members":{ + "SiteBaseUrl":{"shape":"SiteBaseUrl"} + } + }, + "JoinInstruction":{ + "type":"structure", + "required":[ + "LeftOperand", + "RightOperand", + "Type", + "OnClause" + ], + "members":{ + "LeftOperand":{"shape":"LogicalTableId"}, + "RightOperand":{"shape":"LogicalTableId"}, + "Type":{"shape":"JoinType"}, + "OnClause":{"shape":"OnClause"} + } + }, + "JoinType":{ + "type":"string", + "enum":[ + "INNER", + "OUTER", + "LEFT", + "RIGHT" + ] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ResourceType":{"shape":"ExceptionResourceType"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ListDashboardVersionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DashboardId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DashboardId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"DashboardId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListDashboardVersionsResponse":{ + "type":"structure", + "members":{ + "DashboardVersionSummaryList":{"shape":"DashboardVersionSummaryList"}, + "NextToken":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "ListDashboardsRequest":{ + "type":"structure", + "required":["AwsAccountId"], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListDashboardsResponse":{ + "type":"structure", + "members":{ + "DashboardSummaryList":{"shape":"DashboardSummaryList"}, + "NextToken":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "ListDataSetsRequest":{ + "type":"structure", + "required":["AwsAccountId"], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListDataSetsResponse":{ + "type":"structure", + "members":{ + "DataSetSummaries":{"shape":"DataSetSummaryList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListDataSourcesRequest":{ + "type":"structure", + "required":["AwsAccountId"], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListDataSourcesResponse":{ + "type":"structure", + "members":{ + "DataSources":{"shape":"DataSourceList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListGroupMembershipsRequest":{ + "type":"structure", + "required":[ + "GroupName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "GroupName":{ + "shape":"GroupName", + "location":"uri", + "locationName":"GroupName" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "ListGroupMembershipsResponse":{ + "type":"structure", + "members":{ + "GroupMemberList":{"shape":"GroupMemberList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListGroupsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "ListGroupsResponse":{ + "type":"structure", + "members":{ + "GroupList":{"shape":"GroupList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListIAMPolicyAssignmentsForUserRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "UserName", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "UserName":{ + "shape":"UserName", + "location":"uri", + "locationName":"UserName" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "ListIAMPolicyAssignmentsForUserResponse":{ + "type":"structure", + "members":{ + "ActiveAssignments":{"shape":"ActiveIAMPolicyAssignmentList"}, + "RequestId":{"shape":"String"}, + "NextToken":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListIAMPolicyAssignmentsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "AssignmentStatus":{"shape":"AssignmentStatus"}, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListIAMPolicyAssignmentsResponse":{ + "type":"structure", + "members":{ + "IAMPolicyAssignments":{"shape":"IAMPolicyAssignmentSummaryList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListIngestionsRequest":{ + "type":"structure", + "required":[ + "DataSetId", + "AwsAccountId" + ], + "members":{ + "DataSetId":{ + "shape":"string", + "location":"uri", + "locationName":"DataSetId" + }, + "NextToken":{ + "shape":"string", + "location":"querystring", + "locationName":"next-token" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "MaxResults":{ + "shape":"IngestionMaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListIngestionsResponse":{ + "type":"structure", + "members":{ + "Ingestions":{"shape":"Ingestions"}, + "NextToken":{"shape":"string"}, + "RequestId":{"shape":"string"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"Arn", + "location":"uri", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagList"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListTemplateAliasesRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-result" + } + } + }, + "ListTemplateAliasesResponse":{ + "type":"structure", + "members":{ + "TemplateAliasList":{"shape":"TemplateAliasList"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"}, + "NextToken":{"shape":"String"} + } + }, + "ListTemplateVersionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListTemplateVersionsResponse":{ + "type":"structure", + "members":{ + "TemplateVersionSummaryList":{"shape":"TemplateVersionSummaryList"}, + "NextToken":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "ListTemplatesRequest":{ + "type":"structure", + "required":["AwsAccountId"], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-result" + } + } + }, + "ListTemplatesResponse":{ + "type":"structure", + "members":{ + "TemplateSummaryList":{"shape":"TemplateSummaryList"}, + "NextToken":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, + "RequestId":{"shape":"String"} + } + }, + "ListUserGroupsRequest":{ + "type":"structure", + "required":[ + "UserName", + "AwsAccountId", + "Namespace" + ], + "members":{ + "UserName":{ + "shape":"UserName", + "location":"uri", + "locationName":"UserName" + }, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListUserGroupsResponse":{ + "type":"structure", + "members":{ + "GroupList":{"shape":"GroupList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "ListUsersRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "NextToken":{ + "shape":"String", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "box":true, + "location":"querystring", + "locationName":"max-results" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "ListUsersResponse":{ + "type":"structure", + "members":{ + "UserList":{"shape":"UserList"}, + "NextToken":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "LogicalTable":{ + "type":"structure", + "required":[ + "Alias", + "Source" + ], + "members":{ + "Alias":{"shape":"LogicalTableAlias"}, + "DataTransforms":{"shape":"TransformOperationList"}, + "Source":{"shape":"LogicalTableSource"} + } + }, + "LogicalTableAlias":{ + "type":"string", + "max":64, + "min":1 + }, + "LogicalTableId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[0-9a-zA-Z-]*" + }, + "LogicalTableMap":{ + "type":"map", + "key":{"shape":"LogicalTableId"}, + "value":{"shape":"LogicalTable"}, + "max":32, + "min":1 + }, + "LogicalTableSource":{ + "type":"structure", + "members":{ + "JoinInstruction":{"shape":"JoinInstruction"}, + "PhysicalTableId":{"shape":"PhysicalTableId"} + } + }, + "Long":{"type":"long"}, + "LongList":{ + "type":"list", + "member":{"shape":"Long"} + }, + "ManifestFileLocation":{ + "type":"structure", + "required":[ + "Bucket", + "Key" + ], + "members":{ + "Bucket":{"shape":"S3Bucket"}, + "Key":{"shape":"S3Key"} + } + }, + "MariaDbParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Database" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "MySqlParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Database" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} + } + }, + "Namespace":{ + "type":"string", + "max":64, + "pattern":"^[a-zA-Z0-9._-]*$" + }, + "NonEmptyString":{ + "type":"string", + "pattern":".*\\S.*" + }, + "OnClause":{ + "type":"string", + "max":512, + "min":1 + }, + "OptionalPort":{ + "type":"integer", + "max":65535, + "min":0 + }, + "OutputColumn":{ + "type":"structure", + "members":{ + "Name":{"shape":"ColumnName"}, + "Type":{"shape":"ColumnDataType"} + } + }, + "OutputColumnList":{ + "type":"list", + "member":{"shape":"OutputColumn"} + }, + "Parameters":{ + "type":"structure", + "members":{ + "StringParameters":{"shape":"StringParameterList"}, + "IntegerParameters":{"shape":"IntegerParameterList"}, + "DecimalParameters":{"shape":"DecimalParameterList"}, + "DateTimeParameters":{"shape":"DateTimeParameterList"} + } + }, + "Password":{ + "type":"string", + "max":1024, + "min":1 + }, + "PhysicalTable":{ + "type":"structure", + "members":{ + "RelationalTable":{"shape":"RelationalTable"}, + "CustomSql":{"shape":"CustomSql"}, + "S3Source":{"shape":"S3Source"} + } + }, + "PhysicalTableId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[0-9a-zA-Z-]*" + }, + "PhysicalTableMap":{ + "type":"map", + "key":{"shape":"PhysicalTableId"}, + "value":{"shape":"PhysicalTable"}, + "max":16, + "min":1 + }, + "Port":{ + "type":"integer", + "max":65535, + "min":1 + }, + "PositiveInteger":{ + "type":"integer", + "min":1 + }, + "PostgreSqlParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Database" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} + } + }, + "PreconditionNotMetException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "PrestoParameters":{ + "type":"structure", + "required":[ + "Host", + "Port", + "Catalog" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Catalog":{"shape":"Catalog"} + } + }, + "Principal":{ + "type":"string", + "max":256, + "min":1 + }, + "ProjectOperation":{ + "type":"structure", + "required":["ProjectedColumns"], + "members":{ + "ProjectedColumns":{"shape":"ProjectedColumnList"} + } + }, + "ProjectedColumnList":{ + "type":"list", + "member":{"shape":"String"}, + "max":2000, + "min":1 + }, + "Query":{ + "type":"string", + "max":256, + "min":1 + }, + "QueueInfo":{ + "type":"structure", + "required":[ + "WaitingOnIngestion", + "QueuedIngestion" + ], + "members":{ + "WaitingOnIngestion":{"shape":"string"}, + "QueuedIngestion":{"shape":"string"} + } + }, + "QuickSightUserNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "RdsParameters":{ + "type":"structure", + "required":[ + "InstanceId", + "Database" + ], + "members":{ + "InstanceId":{"shape":"InstanceId"}, + "Database":{"shape":"Database"} + } + }, + "RedshiftParameters":{ + "type":"structure", + "required":["Database"], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"OptionalPort"}, + "Database":{"shape":"Database"}, + "ClusterId":{"shape":"ClusterId"} + } + }, + "RegisterUserRequest":{ + "type":"structure", + "required":[ + "IdentityType", + "Email", + "UserRole", + "AwsAccountId", + "Namespace" + ], + "members":{ + "IdentityType":{"shape":"IdentityType"}, + "Email":{"shape":"String"}, + "UserRole":{"shape":"UserRole"}, + "IamArn":{"shape":"String"}, + "SessionName":{"shape":"RoleSessionName"}, + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "location":"uri", + "locationName":"Namespace" + }, + "UserName":{"shape":"UserName"} + } + }, + "RegisterUserResponse":{ + "type":"structure", + "members":{ + "User":{"shape":"User"}, + "UserInvitationUrl":{"shape":"String"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "RelationalTable":{ + "type":"structure", + "required":[ + "DataSourceArn", + "Name", + "InputColumns" + ], + "members":{ + "DataSourceArn":{"shape":"Arn"}, + "Schema":{"shape":"RelationalTableSchema"}, + "Name":{"shape":"RelationalTableName"}, + "InputColumns":{"shape":"InputColumnList"} + } + }, + "RelationalTableName":{ + "type":"string", + "max":64, + "min":1 + }, + "RelationalTableSchema":{ + "type":"string", + "max":64 + }, + "RenameColumnOperation":{ + "type":"structure", + "required":[ + "ColumnName", + "NewColumnName" + ], + "members":{ + "ColumnName":{"shape":"ColumnName"}, + "NewColumnName":{"shape":"ColumnName"} + } + }, + "ResourceExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ResourceType":{"shape":"ExceptionResourceType"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":409}, + "exception":true + }, + "ResourceId":{"type":"string"}, + "ResourceName":{ + "type":"string", + "max":128, + "min":1 + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ResourceType":{"shape":"ExceptionResourceType"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "ResourcePermission":{ + "type":"structure", + "required":[ + "Principal", + "Actions" + ], + "members":{ + "Principal":{"shape":"Principal"}, + "Actions":{"shape":"ActionList"} + } + }, + "ResourcePermissionList":{ + "type":"list", + "member":{"shape":"ResourcePermission"}, + "max":64, + "min":1 + }, + "ResourceStatus":{ + "type":"string", + "enum":[ + "CREATION_IN_PROGRESS", + "CREATION_SUCCESSFUL", + "CREATION_FAILED", + "UPDATE_IN_PROGRESS", + "UPDATE_SUCCESSFUL", + "UPDATE_FAILED" + ] + }, + "ResourceUnavailableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ResourceType":{"shape":"ExceptionResourceType"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":503}, + "exception":true + }, + "RestrictiveResourceId":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\w\\-]+" + }, + "RoleSessionName":{ + "type":"string", + "max":64, + "min":2, + "pattern":"[\\w+=.@-]*" + }, + "RowInfo":{ + "type":"structure", + "members":{ + "RowsIngested":{ + "shape":"long", + "box":true + }, + "RowsDropped":{ + "shape":"long", + "box":true + } + } + }, + "RowLevelPermissionDataSet":{ + "type":"structure", + "required":[ + "Arn", + "PermissionPolicy" + ], + "members":{ + "Arn":{"shape":"Arn"}, + "PermissionPolicy":{"shape":"RowLevelPermissionPolicy"} + } + }, + "RowLevelPermissionPolicy":{ + "type":"string", + "enum":[ + "GRANT_ACCESS", + "DENY_ACCESS" + ] + }, + "S3Bucket":{ + "type":"string", + "max":1024, + "min":1 + }, + "S3Key":{ + "type":"string", + "max":1024, + "min":1 + }, + "S3Parameters":{ + "type":"structure", + "required":["ManifestFileLocation"], + "members":{ + "ManifestFileLocation":{"shape":"ManifestFileLocation"} + } + }, + "S3Source":{ + "type":"structure", + "required":[ + "DataSourceArn", + "InputColumns" + ], + "members":{ + "DataSourceArn":{"shape":"Arn"}, + "UploadSettings":{"shape":"UploadSettings"}, + "InputColumns":{"shape":"InputColumnList"} + } + }, + "ServiceNowParameters":{ + "type":"structure", + "required":["SiteBaseUrl"], + "members":{ + "SiteBaseUrl":{"shape":"SiteBaseUrl"} + } + }, + "SessionLifetimeInMinutes":{ + "type":"long", + "max":600, + "min":15 + }, + "SessionLifetimeInMinutesInvalidException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "SheetControlsOption":{ + "type":"structure", + "members":{ + "VisibilityState":{"shape":"DashboardUIState"} + } + }, + "SiteBaseUrl":{ + "type":"string", + "max":1024, + "min":1 + }, + "SnowflakeParameters":{ "type":"structure", + "required":[ + "Host", + "Database", + "Warehouse" + ], "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":401}, - "exception":true + "Host":{"shape":"Host"}, + "Database":{"shape":"Database"}, + "Warehouse":{"shape":"Warehouse"} + } }, - "Arn":{"type":"string"}, - "AwsAccountId":{ + "SparkParameters":{ + "type":"structure", + "required":[ + "Host", + "Port" + ], + "members":{ + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"} + } + }, + "SqlQuery":{ "type":"string", - "max":12, - "min":12, - "pattern":"^[0-9]{12}$" + "max":65536, + "min":1 }, - "Boolean":{"type":"boolean"}, - "CreateGroupMembershipRequest":{ + "SqlServerParameters":{ "type":"structure", "required":[ - "MemberName", - "GroupName", - "AwsAccountId", - "Namespace" + "Host", + "Port", + "Database" ], "members":{ - "MemberName":{ - "shape":"GroupMemberName", - "location":"uri", - "locationName":"MemberName" - }, - "GroupName":{ - "shape":"GroupName", - "location":"uri", - "locationName":"GroupName" - }, - "AwsAccountId":{ - "shape":"AwsAccountId", - "location":"uri", - "locationName":"AwsAccountId" - }, - "Namespace":{ - "shape":"Namespace", - "location":"uri", - "locationName":"Namespace" - } + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} } }, - "CreateGroupMembershipResponse":{ + "SslProperties":{ "type":"structure", "members":{ - "GroupMember":{"shape":"GroupMember"}, - "RequestId":{"shape":"String"}, - "Status":{ - "shape":"StatusCode", - "location":"statusCode" - } + "DisableSsl":{"shape":"Boolean"} } }, - "CreateGroupRequest":{ + "StatusCode":{"type":"integer"}, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, + "StringParameter":{ "type":"structure", "required":[ - "GroupName", - "AwsAccountId", - "Namespace" + "Name", + "Values" ], "members":{ - "GroupName":{"shape":"GroupName"}, - "Description":{"shape":"GroupDescription"}, - "AwsAccountId":{ - "shape":"AwsAccountId", - "location":"uri", - "locationName":"AwsAccountId" - }, - "Namespace":{ - "shape":"Namespace", - "location":"uri", - "locationName":"Namespace" - } + "Name":{"shape":"NonEmptyString"}, + "Values":{"shape":"StringList"} } }, - "CreateGroupResponse":{ + "StringParameterList":{ + "type":"list", + "member":{"shape":"StringParameter"}, + "max":100 + }, + "Tag":{ "type":"structure", + "required":[ + "Key", + "Value" + ], "members":{ - "Group":{"shape":"Group"}, - "RequestId":{"shape":"String"}, - "Status":{ - "shape":"StatusCode", - "location":"statusCode" - } + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} } }, - "DeleteGroupMembershipRequest":{ + "TagColumnOperation":{ "type":"structure", "required":[ - "MemberName", - "GroupName", - "AwsAccountId", - "Namespace" + "ColumnName", + "Tags" ], "members":{ - "MemberName":{ - "shape":"GroupMemberName", - "location":"uri", - "locationName":"MemberName" - }, - "GroupName":{ - "shape":"GroupName", - "location":"uri", - "locationName":"GroupName" - }, - "AwsAccountId":{ - "shape":"AwsAccountId", + "ColumnName":{"shape":"ColumnName"}, + "Tags":{"shape":"ColumnTagList"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"Arn", "location":"uri", - "locationName":"AwsAccountId" + "locationName":"ResourceArn" }, - "Namespace":{ - "shape":"Namespace", - "location":"uri", - "locationName":"Namespace" - } + "Tags":{"shape":"TagList"} } }, - "DeleteGroupMembershipResponse":{ + "TagResourceResponse":{ "type":"structure", "members":{ "RequestId":{"shape":"String"}, @@ -441,102 +4682,223 @@ } } }, - "DeleteGroupRequest":{ + "TagValue":{ + "type":"string", + "max":256, + "min":1 + }, + "Template":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "Name":{"shape":"TemplateName"}, + "Version":{"shape":"TemplateVersion"}, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "LastUpdatedTime":{"shape":"Timestamp"}, + "CreatedTime":{"shape":"Timestamp"} + } + }, + "TemplateAlias":{ + "type":"structure", + "members":{ + "AliasName":{"shape":"AliasName"}, + "Arn":{"shape":"Arn"}, + "TemplateVersionNumber":{"shape":"VersionNumber"} + } + }, + "TemplateAliasList":{ + "type":"list", + "member":{"shape":"TemplateAlias"}, + "max":100 + }, + "TemplateError":{ + "type":"structure", + "members":{ + "Type":{"shape":"TemplateErrorType"}, + "Message":{"shape":"NonEmptyString"} + } + }, + "TemplateErrorList":{ + "type":"list", + "member":{"shape":"TemplateError"}, + "min":1 + }, + "TemplateErrorType":{ + "type":"string", + "enum":[ + "DATA_SET_NOT_FOUND", + "INTERNAL_FAILURE" + ] + }, + "TemplateName":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[\\u0020-\\u00FF]+" + }, + "TemplateSourceAnalysis":{ "type":"structure", "required":[ - "GroupName", - "AwsAccountId", - "Namespace" + "Arn", + "DataSetReferences" ], "members":{ - "GroupName":{ - "shape":"GroupName", - "location":"uri", - "locationName":"GroupName" - }, - "AwsAccountId":{ - "shape":"AwsAccountId", - "location":"uri", - "locationName":"AwsAccountId" - }, - "Namespace":{ - "shape":"Namespace", - "location":"uri", - "locationName":"Namespace" - } + "Arn":{"shape":"Arn"}, + "DataSetReferences":{"shape":"DataSetReferenceList"} } }, - "DeleteGroupResponse":{ + "TemplateSourceEntity":{ "type":"structure", "members":{ - "RequestId":{"shape":"String"}, - "Status":{ - "shape":"StatusCode", - "location":"statusCode" - } + "SourceAnalysis":{"shape":"TemplateSourceAnalysis"}, + "SourceTemplate":{"shape":"TemplateSourceTemplate"} } }, - "DeleteUserByPrincipalIdRequest":{ + "TemplateSourceTemplate":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{"shape":"Arn"} + } + }, + "TemplateSummary":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "Name":{"shape":"TemplateName"}, + "LatestVersionNumber":{"shape":"VersionNumber"}, + "CreatedTime":{"shape":"Timestamp"}, + "LastUpdatedTime":{"shape":"Timestamp"} + } + }, + "TemplateSummaryList":{ + "type":"list", + "member":{"shape":"TemplateSummary"}, + "max":100 + }, + "TemplateVersion":{ + "type":"structure", + "members":{ + "CreatedTime":{"shape":"Timestamp"}, + "Errors":{"shape":"TemplateErrorList"}, + "VersionNumber":{"shape":"VersionNumber"}, + "Status":{"shape":"ResourceStatus"}, + "DataSetConfigurations":{"shape":"DataSetConfigurationList"}, + "Description":{"shape":"VersionDescription"}, + "SourceEntityArn":{"shape":"Arn"} + } + }, + "TemplateVersionSummary":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "VersionNumber":{"shape":"VersionNumber"}, + "CreatedTime":{"shape":"Timestamp"}, + "Status":{"shape":"ResourceStatus"}, + "Description":{"shape":"VersionDescription"} + } + }, + "TemplateVersionSummaryList":{ + "type":"list", + "member":{"shape":"TemplateVersionSummary"}, + "max":100 + }, + "TeradataParameters":{ "type":"structure", "required":[ - "PrincipalId", - "AwsAccountId", - "Namespace" + "Host", + "Port", + "Database" ], "members":{ - "PrincipalId":{ - "shape":"String", - "location":"uri", - "locationName":"PrincipalId" - }, - "AwsAccountId":{ - "shape":"AwsAccountId", - "location":"uri", - "locationName":"AwsAccountId" - }, - "Namespace":{ - "shape":"Namespace", - "location":"uri", - "locationName":"Namespace" - } + "Host":{"shape":"Host"}, + "Port":{"shape":"Port"}, + "Database":{"shape":"Database"} + } + }, + "TextQualifier":{ + "type":"string", + "enum":[ + "DOUBLE_QUOTE", + "SINGLE_QUOTE" + ] + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "TimestampList":{ + "type":"list", + "member":{"shape":"Timestamp"} + }, + "TransformOperation":{ + "type":"structure", + "members":{ + "ProjectOperation":{"shape":"ProjectOperation"}, + "FilterOperation":{"shape":"FilterOperation"}, + "CreateColumnsOperation":{"shape":"CreateColumnsOperation"}, + "RenameColumnOperation":{"shape":"RenameColumnOperation"}, + "CastColumnTypeOperation":{"shape":"CastColumnTypeOperation"}, + "TagColumnOperation":{"shape":"TagColumnOperation"} + } + }, + "TransformOperationList":{ + "type":"list", + "member":{"shape":"TransformOperation"}, + "max":2048, + "min":1 + }, + "TwitterParameters":{ + "type":"structure", + "required":[ + "Query", + "MaxRows" + ], + "members":{ + "Query":{"shape":"Query"}, + "MaxRows":{"shape":"PositiveInteger"} } }, - "DeleteUserByPrincipalIdResponse":{ + "TypeCastFormat":{ + "type":"string", + "max":32 + }, + "UnsupportedUserEditionException":{ "type":"structure", "members":{ - "RequestId":{"shape":"String"}, - "Status":{ - "shape":"StatusCode", - "location":"statusCode" - } - } + "Message":{"shape":"String"}, + "RequestId":{"shape":"String"} + }, + "error":{"httpStatusCode":403}, + "exception":true }, - "DeleteUserRequest":{ + "UntagResourceRequest":{ "type":"structure", "required":[ - "UserName", - "AwsAccountId", - "Namespace" + "ResourceArn", + "TagKeys" ], "members":{ - "UserName":{ - "shape":"UserName", - "location":"uri", - "locationName":"UserName" - }, - "AwsAccountId":{ - "shape":"AwsAccountId", + "ResourceArn":{ + "shape":"Arn", "location":"uri", - "locationName":"AwsAccountId" + "locationName":"ResourceArn" }, - "Namespace":{ - "shape":"Namespace", - "location":"uri", - "locationName":"Namespace" + "TagKeys":{ + "shape":"TagKeyList", + "location":"querystring", + "locationName":"keys" } } }, - "DeleteUserResponse":{ + "UntagResourceResponse":{ "type":"structure", "members":{ "RequestId":{"shape":"String"}, @@ -546,35 +4908,33 @@ } } }, - "DescribeGroupRequest":{ + "UpdateDashboardPermissionsRequest":{ "type":"structure", "required":[ - "GroupName", "AwsAccountId", - "Namespace" + "DashboardId" ], "members":{ - "GroupName":{ - "shape":"GroupName", - "location":"uri", - "locationName":"GroupName" - }, "AwsAccountId":{ "shape":"AwsAccountId", "location":"uri", "locationName":"AwsAccountId" }, - "Namespace":{ - "shape":"Namespace", + "DashboardId":{ + "shape":"RestrictiveResourceId", "location":"uri", - "locationName":"Namespace" - } + "locationName":"DashboardId" + }, + "GrantPermissions":{"shape":"UpdateResourcePermissionList"}, + "RevokePermissions":{"shape":"UpdateResourcePermissionList"} } }, - "DescribeGroupResponse":{ + "UpdateDashboardPermissionsResponse":{ "type":"structure", "members":{ - "Group":{"shape":"Group"}, + "DashboardArn":{"shape":"Arn"}, + "DashboardId":{"shape":"RestrictiveResourceId"}, + "Permissions":{"shape":"ResourcePermissionList"}, "RequestId":{"shape":"String"}, "Status":{ "shape":"StatusCode", @@ -582,73 +4942,50 @@ } } }, - "DescribeUserRequest":{ + "UpdateDashboardPublishedVersionRequest":{ "type":"structure", "required":[ - "UserName", "AwsAccountId", - "Namespace" + "DashboardId", + "VersionNumber" ], "members":{ - "UserName":{ - "shape":"UserName", - "location":"uri", - "locationName":"UserName" - }, "AwsAccountId":{ "shape":"AwsAccountId", "location":"uri", "locationName":"AwsAccountId" }, - "Namespace":{ - "shape":"Namespace", + "DashboardId":{ + "shape":"RestrictiveResourceId", "location":"uri", - "locationName":"Namespace" + "locationName":"DashboardId" + }, + "VersionNumber":{ + "shape":"VersionNumber", + "location":"uri", + "locationName":"VersionNumber" } } }, - "DescribeUserResponse":{ + "UpdateDashboardPublishedVersionResponse":{ "type":"structure", "members":{ - "User":{"shape":"User"}, - "RequestId":{"shape":"String"}, + "DashboardId":{"shape":"RestrictiveResourceId"}, + "DashboardArn":{"shape":"Arn"}, "Status":{ "shape":"StatusCode", "location":"statusCode" - } - } - }, - "DomainNotWhitelistedException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, + }, "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":403}, - "exception":true - }, - "EmbeddingUrl":{ - "type":"string", - "sensitive":true - }, - "ExceptionResourceType":{ - "type":"string", - "enum":[ - "USER", - "GROUP", - "NAMESPACE", - "DATA_SOURCE", - "DATA_SET", - "VPC_CONNECTION", - "INGESTION" - ] + } }, - "GetDashboardEmbedUrlRequest":{ + "UpdateDashboardRequest":{ "type":"structure", "required":[ "AwsAccountId", "DashboardId", - "IdentityType" + "Name", + "SourceEntity" ], "members":{ "AwsAccountId":{ @@ -657,183 +4994,129 @@ "locationName":"AwsAccountId" }, "DashboardId":{ - "shape":"String", + "shape":"RestrictiveResourceId", "location":"uri", "locationName":"DashboardId" }, - "IdentityType":{ - "shape":"IdentityType", - "location":"querystring", - "locationName":"creds-type" - }, - "SessionLifetimeInMinutes":{ - "shape":"SessionLifetimeInMinutes", - "location":"querystring", - "locationName":"session-lifetime" - }, - "UndoRedoDisabled":{ - "shape":"boolean", - "location":"querystring", - "locationName":"undo-redo-disabled" - }, - "ResetDisabled":{ - "shape":"boolean", - "location":"querystring", - "locationName":"reset-disabled" - }, - "UserArn":{ - "shape":"Arn", - "location":"querystring", - "locationName":"user-arn" - } + "Name":{"shape":"DashboardName"}, + "SourceEntity":{"shape":"DashboardSourceEntity"}, + "Parameters":{"shape":"Parameters"}, + "VersionDescription":{"shape":"VersionDescription"}, + "DashboardPublishOptions":{"shape":"DashboardPublishOptions"} } }, - "GetDashboardEmbedUrlResponse":{ + "UpdateDashboardResponse":{ "type":"structure", "members":{ - "EmbedUrl":{"shape":"EmbeddingUrl"}, - "Status":{ - "shape":"StatusCode", - "location":"statusCode" - }, + "Arn":{"shape":"Arn"}, + "VersionArn":{"shape":"Arn"}, + "DashboardId":{"shape":"RestrictiveResourceId"}, + "CreationStatus":{"shape":"ResourceStatus"}, + "Status":{"shape":"StatusCode"}, "RequestId":{"shape":"String"} } }, - "Group":{ + "UpdateDataSetPermissionsRequest":{ "type":"structure", + "required":[ + "AwsAccountId", + "DataSetId" + ], "members":{ - "Arn":{"shape":"Arn"}, - "GroupName":{"shape":"GroupName"}, - "Description":{"shape":"GroupDescription"}, - "PrincipalId":{"shape":"String"} + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" + }, + "GrantPermissions":{"shape":"ResourcePermissionList"}, + "RevokePermissions":{"shape":"ResourcePermissionList"} } }, - "GroupDescription":{ - "type":"string", - "max":512, - "min":1 - }, - "GroupList":{ - "type":"list", - "member":{"shape":"Group"} - }, - "GroupMember":{ + "UpdateDataSetPermissionsResponse":{ "type":"structure", "members":{ - "Arn":{"shape":"Arn"}, - "MemberName":{"shape":"GroupMemberName"} + "DataSetArn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } } }, - "GroupMemberList":{ - "type":"list", - "member":{"shape":"GroupMember"} - }, - "GroupMemberName":{ - "type":"string", - "max":256, - "min":1, - "pattern":"[\\u0020-\\u00FF]+" - }, - "GroupName":{ - "type":"string", - "min":1, - "pattern":"[\\u0020-\\u00FF]+" - }, - "IdentityType":{ - "type":"string", - "enum":[ - "IAM", - "QUICKSIGHT" - ] - }, - "IdentityTypeNotSupportedException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":403}, - "exception":true - }, - "InternalFailureException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":500}, - "exception":true, - "fault":true - }, - "InvalidNextTokenException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":400}, - "exception":true - }, - "InvalidParameterValueException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":400}, - "exception":true - }, - "LimitExceededException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "ResourceType":{"shape":"ExceptionResourceType"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":409}, - "exception":true - }, - "ListGroupMembershipsRequest":{ + "UpdateDataSetRequest":{ "type":"structure", "required":[ - "GroupName", "AwsAccountId", - "Namespace" + "DataSetId", + "Name", + "PhysicalTableMap", + "ImportMode" ], "members":{ - "GroupName":{ - "shape":"GroupName", + "AwsAccountId":{ + "shape":"AwsAccountId", "location":"uri", - "locationName":"GroupName" - }, - "NextToken":{ - "shape":"String", - "location":"querystring", - "locationName":"next-token" + "locationName":"AwsAccountId" }, - "MaxResults":{ - "shape":"MaxResults", - "box":true, - "location":"querystring", - "locationName":"max-results" + "DataSetId":{ + "shape":"ResourceId", + "location":"uri", + "locationName":"DataSetId" }, + "Name":{"shape":"ResourceName"}, + "PhysicalTableMap":{"shape":"PhysicalTableMap"}, + "LogicalTableMap":{"shape":"LogicalTableMap"}, + "ImportMode":{"shape":"DataSetImportMode"}, + "ColumnGroups":{"shape":"ColumnGroupList"}, + "RowLevelPermissionDataSet":{"shape":"RowLevelPermissionDataSet"} + } + }, + "UpdateDataSetResponse":{ + "type":"structure", + "members":{ + "Arn":{"shape":"Arn"}, + "DataSetId":{"shape":"ResourceId"}, + "IngestionArn":{"shape":"Arn"}, + "IngestionId":{"shape":"ResourceId"}, + "RequestId":{"shape":"String"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + } + } + }, + "UpdateDataSourcePermissionsRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "DataSourceId" + ], + "members":{ "AwsAccountId":{ "shape":"AwsAccountId", "location":"uri", "locationName":"AwsAccountId" }, - "Namespace":{ - "shape":"Namespace", + "DataSourceId":{ + "shape":"ResourceId", "location":"uri", - "locationName":"Namespace" - } + "locationName":"DataSourceId" + }, + "GrantPermissions":{"shape":"ResourcePermissionList"}, + "RevokePermissions":{"shape":"ResourcePermissionList"} } }, - "ListGroupMembershipsResponse":{ + "UpdateDataSourcePermissionsResponse":{ "type":"structure", "members":{ - "GroupMemberList":{"shape":"GroupMemberList"}, - "NextToken":{"shape":"String"}, + "DataSourceArn":{"shape":"Arn"}, + "DataSourceId":{"shape":"ResourceId"}, "RequestId":{"shape":"String"}, "Status":{ "shape":"StatusCode", @@ -841,11 +5124,12 @@ } } }, - "ListGroupsRequest":{ + "UpdateDataSourceRequest":{ "type":"structure", "required":[ "AwsAccountId", - "Namespace" + "DataSourceId", + "Name" ], "members":{ "AwsAccountId":{ @@ -853,29 +5137,24 @@ "location":"uri", "locationName":"AwsAccountId" }, - "NextToken":{ - "shape":"String", - "location":"querystring", - "locationName":"next-token" - }, - "MaxResults":{ - "shape":"MaxResults", - "box":true, - "location":"querystring", - "locationName":"max-results" - }, - "Namespace":{ - "shape":"Namespace", + "DataSourceId":{ + "shape":"ResourceId", "location":"uri", - "locationName":"Namespace" - } + "locationName":"DataSourceId" + }, + "Name":{"shape":"ResourceName"}, + "DataSourceParameters":{"shape":"DataSourceParameters"}, + "Credentials":{"shape":"DataSourceCredentials"}, + "VpcConnectionProperties":{"shape":"VpcConnectionProperties"}, + "SslProperties":{"shape":"SslProperties"} } }, - "ListGroupsResponse":{ + "UpdateDataSourceResponse":{ "type":"structure", "members":{ - "GroupList":{"shape":"GroupList"}, - "NextToken":{"shape":"String"}, + "Arn":{"shape":"Arn"}, + "DataSourceId":{"shape":"ResourceId"}, + "UpdateStatus":{"shape":"ResourceStatus"}, "RequestId":{"shape":"String"}, "Status":{ "shape":"StatusCode", @@ -883,19 +5162,20 @@ } } }, - "ListUserGroupsRequest":{ + "UpdateGroupRequest":{ "type":"structure", "required":[ - "UserName", + "GroupName", "AwsAccountId", "Namespace" ], "members":{ - "UserName":{ - "shape":"UserName", + "GroupName":{ + "shape":"GroupName", "location":"uri", - "locationName":"UserName" + "locationName":"GroupName" }, + "Description":{"shape":"GroupDescription"}, "AwsAccountId":{ "shape":"AwsAccountId", "location":"uri", @@ -905,25 +5185,13 @@ "shape":"Namespace", "location":"uri", "locationName":"Namespace" - }, - "NextToken":{ - "shape":"String", - "location":"querystring", - "locationName":"next-token" - }, - "MaxResults":{ - "shape":"MaxResults", - "box":true, - "location":"querystring", - "locationName":"max-results" } } }, - "ListUserGroupsResponse":{ + "UpdateGroupResponse":{ "type":"structure", "members":{ - "GroupList":{"shape":"GroupList"}, - "NextToken":{"shape":"String"}, + "Group":{"shape":"Group"}, "RequestId":{"shape":"String"}, "Status":{ "shape":"StatusCode", @@ -931,10 +5199,11 @@ } } }, - "ListUsersRequest":{ + "UpdateIAMPolicyAssignmentRequest":{ "type":"structure", "required":[ "AwsAccountId", + "AssignmentName", "Namespace" ], "members":{ @@ -943,29 +5212,29 @@ "location":"uri", "locationName":"AwsAccountId" }, - "NextToken":{ - "shape":"String", - "location":"querystring", - "locationName":"next-token" - }, - "MaxResults":{ - "shape":"MaxResults", - "box":true, - "location":"querystring", - "locationName":"max-results" + "AssignmentName":{ + "shape":"IAMPolicyAssignmentName", + "location":"uri", + "locationName":"AssignmentName" }, "Namespace":{ "shape":"Namespace", "location":"uri", "locationName":"Namespace" - } + }, + "AssignmentStatus":{"shape":"AssignmentStatus"}, + "PolicyArn":{"shape":"Arn"}, + "Identities":{"shape":"IdentityMap"} } }, - "ListUsersResponse":{ + "UpdateIAMPolicyAssignmentResponse":{ "type":"structure", "members":{ - "UserList":{"shape":"UserList"}, - "NextToken":{"shape":"String"}, + "AssignmentName":{"shape":"IAMPolicyAssignmentName"}, + "AssignmentId":{"shape":"String"}, + "PolicyArn":{"shape":"Arn"}, + "Identities":{"shape":"IdentityMap"}, + "AssignmentStatus":{"shape":"AssignmentStatus"}, "RequestId":{"shape":"String"}, "Status":{ "shape":"StatusCode", @@ -973,66 +5242,77 @@ } } }, - "MaxResults":{ - "type":"integer", + "UpdateResourcePermissionList":{ + "type":"list", + "member":{"shape":"ResourcePermission"}, "max":100, "min":1 }, - "Namespace":{ - "type":"string", - "pattern":"default" - }, - "PreconditionNotMetException":{ + "UpdateTemplateAliasRequest":{ "type":"structure", + "required":[ + "AwsAccountId", + "TemplateId", + "AliasName", + "TemplateVersionNumber" + ], "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":400}, - "exception":true + "AwsAccountId":{ + "shape":"AwsAccountId", + "location":"uri", + "locationName":"AwsAccountId" + }, + "TemplateId":{ + "shape":"RestrictiveResourceId", + "location":"uri", + "locationName":"TemplateId" + }, + "AliasName":{ + "shape":"AliasName", + "location":"uri", + "locationName":"AliasName" + }, + "TemplateVersionNumber":{"shape":"VersionNumber"} + } }, - "QuickSightUserNotFoundException":{ + "UpdateTemplateAliasResponse":{ "type":"structure", "members":{ - "Message":{"shape":"String"}, + "TemplateAlias":{"shape":"TemplateAlias"}, + "Status":{ + "shape":"StatusCode", + "location":"statusCode" + }, "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":404}, - "exception":true + } }, - "RegisterUserRequest":{ + "UpdateTemplatePermissionsRequest":{ "type":"structure", "required":[ - "IdentityType", - "Email", - "UserRole", "AwsAccountId", - "Namespace" + "TemplateId" ], "members":{ - "IdentityType":{"shape":"IdentityType"}, - "Email":{"shape":"String"}, - "UserRole":{"shape":"UserRole"}, - "IamArn":{"shape":"String"}, - "SessionName":{"shape":"RoleSessionName"}, "AwsAccountId":{ "shape":"AwsAccountId", "location":"uri", "locationName":"AwsAccountId" }, - "Namespace":{ - "shape":"Namespace", + "TemplateId":{ + "shape":"RestrictiveResourceId", "location":"uri", - "locationName":"Namespace" + "locationName":"TemplateId" }, - "UserName":{"shape":"UserName"} + "GrantPermissions":{"shape":"UpdateResourcePermissionList"}, + "RevokePermissions":{"shape":"UpdateResourcePermissionList"} } }, - "RegisterUserResponse":{ + "UpdateTemplatePermissionsResponse":{ "type":"structure", "members":{ - "User":{"shape":"User"}, - "UserInvitationUrl":{"shape":"String"}, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "TemplateArn":{"shape":"Arn"}, + "Permissions":{"shape":"ResourcePermissionList"}, "RequestId":{"shape":"String"}, "Status":{ "shape":"StatusCode", @@ -1040,111 +5320,41 @@ } } }, - "ResourceExistsException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "ResourceType":{"shape":"ExceptionResourceType"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":409}, - "exception":true - }, - "ResourceNotFoundException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "ResourceType":{"shape":"ExceptionResourceType"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":404}, - "exception":true - }, - "ResourceUnavailableException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "ResourceType":{"shape":"ExceptionResourceType"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":503}, - "exception":true - }, - "RoleSessionName":{ - "type":"string", - "max":64, - "min":2, - "pattern":"[\\w+=.@-]*" - }, - "SessionLifetimeInMinutes":{ - "type":"long", - "max":600, - "min":15 - }, - "SessionLifetimeInMinutesInvalidException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":400}, - "exception":true - }, - "StatusCode":{"type":"integer"}, - "String":{"type":"string"}, - "ThrottlingException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":429}, - "exception":true - }, - "UnsupportedUserEditionException":{ - "type":"structure", - "members":{ - "Message":{"shape":"String"}, - "RequestId":{"shape":"String"} - }, - "error":{"httpStatusCode":403}, - "exception":true - }, - "UpdateGroupRequest":{ + "UpdateTemplateRequest":{ "type":"structure", "required":[ - "GroupName", "AwsAccountId", - "Namespace" + "TemplateId", + "SourceEntity" ], "members":{ - "GroupName":{ - "shape":"GroupName", - "location":"uri", - "locationName":"GroupName" - }, - "Description":{"shape":"GroupDescription"}, "AwsAccountId":{ "shape":"AwsAccountId", "location":"uri", "locationName":"AwsAccountId" }, - "Namespace":{ - "shape":"Namespace", + "TemplateId":{ + "shape":"RestrictiveResourceId", "location":"uri", - "locationName":"Namespace" - } + "locationName":"TemplateId" + }, + "SourceEntity":{"shape":"TemplateSourceEntity"}, + "VersionDescription":{"shape":"VersionDescription"}, + "Name":{"shape":"TemplateName"} } }, - "UpdateGroupResponse":{ + "UpdateTemplateResponse":{ "type":"structure", "members":{ - "Group":{"shape":"Group"}, - "RequestId":{"shape":"String"}, + "TemplateId":{"shape":"RestrictiveResourceId"}, + "Arn":{"shape":"Arn"}, + "VersionArn":{"shape":"Arn"}, + "CreationStatus":{"shape":"ResourceStatus"}, "Status":{ "shape":"StatusCode", "location":"statusCode" - } + }, + "RequestId":{"shape":"String"} } }, "UpdateUserRequest":{ @@ -1187,6 +5397,22 @@ } } }, + "UploadSettings":{ + "type":"structure", + "members":{ + "Format":{"shape":"FileFormat"}, + "StartFromRow":{ + "shape":"PositiveInteger", + "box":true + }, + "ContainsHeader":{ + "shape":"Boolean", + "box":true + }, + "TextQualifier":{"shape":"TextQualifier"}, + "Delimiter":{"shape":"Delimiter"} + } + }, "User":{ "type":"structure", "members":{ @@ -1218,6 +5444,39 @@ "RESTRICTED_READER" ] }, - "boolean":{"type":"boolean"} + "Username":{ + "type":"string", + "max":64, + "min":1 + }, + "VersionDescription":{ + "type":"string", + "max":512, + "min":1 + }, + "VersionNumber":{ + "type":"long", + "min":1 + }, + "VpcConnectionProperties":{ + "type":"structure", + "required":["VpcConnectionArn"], + "members":{ + "VpcConnectionArn":{"shape":"Arn"} + } + }, + "Warehouse":{ + "type":"string", + "max":128 + }, + "WorkGroup":{ + "type":"string", + "max":128, + "min":1 + }, + "boolean":{"type":"boolean"}, + "long":{"type":"long"}, + "string":{"type":"string"}, + "timestamp":{"type":"timestamp"} } } diff --git a/models/apis/quicksight/2018-04-01/docs-2.json b/models/apis/quicksight/2018-04-01/docs-2.json index a1565f3929d..9ec2d684d1e 100644 --- a/models/apis/quicksight/2018-04-01/docs-2.json +++ b/models/apis/quicksight/2018-04-01/docs-2.json @@ -2,22 +2,71 @@ "version": "2.0", "service": "Amazon QuickSight API Reference

    Amazon QuickSight is a fully managed, serverless, cloud business intelligence service that makes it easy to extend data and insights to every user in your organization. This API interface reference contains documentation for a programming interface that you can use to manage Amazon QuickSight.

    ", "operations": { + "CancelIngestion": "

    Cancels an on-going ingestion of data into SPICE.

    ", + "CreateDashboard": "

    Creates a dashboard from a template. To first create a template, see the CreateTemplate API.

    A dashboard is an entity in QuickSight which identifies Quicksight reports, created from analyses. QuickSight dashboards are sharable. With the right permissions, you can create scheduled email reports from them. The CreateDashboard, DescribeDashboard and ListDashboardsByUser APIs act on the dashboard entity. If you have the correct permissions, you can create a dashboard from a template that exists in a different AWS account.

    CLI syntax:

    aws quicksight create-dashboard --cli-input-json file://create-dashboard.json

    ", + "CreateDataSet": "

    Creates a dataset.

    CLI syntax:

    aws quicksight create-data-set \\

    --aws-account-id=111122223333 \\

    --data-set-id=unique-data-set-id \\

    --name='My dataset' \\

    --import-mode=SPICE \\

    --physical-table-map='{

    \"physical-table-id\": {

    \"RelationalTable\": {

    \"DataSourceArn\": \"arn:aws:quicksight:us-west-2:111111111111:datasource/data-source-id\",

    \"Name\": \"table1\",

    \"InputColumns\": [

    {

    \"Name\": \"column1\",

    \"Type\": \"STRING\"

    }

    ]

    }

    }'

    ", + "CreateDataSource": "

    Creates a data source.

    The permissions resource is arn:aws:quicksight:region:aws-account-id:datasource/data-source-id

    CLI syntax:

    aws quicksight create-data-source \\

    --aws-account-id=111122223333 \\

    --data-source-id=unique-data-source-id \\

    --name='My Data Source' \\

    --type=POSTGRESQL \\

    --data-source-parameters='{ \"PostgreSqlParameters\": {

    \"Host\": \"my-db-host.example.com\",

    \"Port\": 1234,

    \"Database\": \"my-db\" } }' \\

    --credentials='{ \"CredentialPair\": {

    \"Username\": \"username\",

    \"Password\": \"password\" } }'

    ", "CreateGroup": "

    Creates an Amazon QuickSight group.

    The permissions resource is arn:aws:quicksight:us-east-1:<relevant-aws-account-id>:group/default/<group-name> .

    The response is a group object.

    CLI Sample:

    aws quicksight create-group --aws-account-id=111122223333 --namespace=default --group-name=\"Sales-Management\" --description=\"Sales Management - Forecasting\"

    ", "CreateGroupMembership": "

    Adds an Amazon QuickSight user to an Amazon QuickSight group.

    The permissions resource is arn:aws:quicksight:us-east-1:<aws-account-id>:group/default/<group-name> .

    The condition resource is the user name.

    The condition key is quicksight:UserName.

    The response is the group member object.

    CLI Sample:

    aws quicksight create-group-membership --aws-account-id=111122223333 --namespace=default --group-name=Sales --member-name=Pat

    ", + "CreateIAMPolicyAssignment": "

    Creates an assignment with one specified IAM policy ARN and will assigned to specified groups or users of QuickSight. Users and groups need to be in the same namespace.

    CLI syntax:

    aws quicksight create-iam-policy-assignment --aws-account-id=111122223333 --assignment-name=helpAssignment --policy-arn=arn:aws:iam::aws:policy/AdministratorAccess --identities=\"user=user5,engineer123,group=QS-Admin\" --namespace=default --region=us-west-2

    ", + "CreateIngestion": "

    Creates and starts a new SPICE ingestion on a dataset

    Any ingestions operating on tagged datasets inherit the same tags automatically for use in access-control. For an example, see How do I create an IAM policy to control access to Amazon EC2 resources using tags?. Tags will be visible on the tagged dataset, but not on the ingestion resource.

    ", + "CreateTemplate": "

    Creates a template from an existing QuickSight analysis or template. The resulting template can be used to create a dashboard.

    A template is an entity in QuickSight which encapsulates the metadata required to create an analysis that can be used to create dashboard. It adds a layer of abstraction by use placeholders to replace the dataset associated with the analysis. You can use templates to create dashboards by replacing dataset placeholders with datasets which follow the same schema that was used to create the source analysis and template.

    To create a template from an existing analysis, use the analysis's ARN, aws-account-id, template-id, source-entity, and data-set-references.

    CLI syntax to create a template:

    aws quicksight create-template —cli-input-json file://create-template.json

    CLI syntax to create a template from another template in the same AWS account:

    aws quicksight create-template --aws-account-id 111122223333 --template-id reports_test_template --data-set-references DataSetPlaceholder=reports,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/0dfc789c-81f6-4f4f-b9ac-7db2453eefc8 DataSetPlaceholder=Elblogs,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/f60da323-af68-45db-9016-08e0d1d7ded5 --source-entity SourceAnalysis='{Arn=arn:aws:quicksight:us-west-2:111122223333:analysis/7fb74527-c36d-4be8-8139-ac1be4c97365}'

    To create template from another account’s template, you need to grant cross account resource permission for DescribeTemplate the account that contains the template.

    You can use a file to pass JSON to the function if you prefer.

    ", + "CreateTemplateAlias": "

    Creates a template alias for a template.

    CLI syntax:

    aws quicksight create-template-alias --aws-account-id 111122223333 --template-id 'reports_test_template' --alias-name PROD —version-number 1

    ", + "DeleteDashboard": "

    Deletes a dashboard.

    CLI syntax:

    aws quicksight delete-dashboard --aws-account-id 111122223333 —dashboard-id 123123123

    aws quicksight delete-dashboard --aws-account-id 111122223333 —dashboard-id 123123123 —version-number 3

    ", + "DeleteDataSet": "

    Deletes a dataset.

    CLI syntax:

    aws quicksight delete-data-set \\

    --aws-account-id=111111111111 \\

    --data-set-id=unique-data-set-id

    ", + "DeleteDataSource": "

    Deletes the data source permanently. This action breaks all the datasets that reference the deleted data source.

    CLI syntax:

    aws quicksight delete-data-source \\

    --aws-account-id=111122223333 \\

    --data-source-id=unique-data-source-id

    ", "DeleteGroup": "

    Removes a user group from Amazon QuickSight.

    The permissions resource is arn:aws:quicksight:us-east-1:<aws-account-id>:group/default/<group-name> .

    CLI Sample:

    aws quicksight delete-group -\\-aws-account-id=111122223333 -\\-namespace=default -\\-group-name=Sales-Management

    ", "DeleteGroupMembership": "

    Removes a user from a group so that the user is no longer a member of the group.

    The permissions resource is arn:aws:quicksight:us-east-1:<aws-account-id>:group/default/<group-name> .

    The condition resource is the user name.

    The condition key is quicksight:UserName.

    CLI Sample:

    aws quicksight delete-group-membership --aws-account-id=111122223333 --namespace=default --group-name=Sales-Management --member-name=Charlie

    ", - "DeleteUser": "

    Deletes the Amazon QuickSight user that is associated with the identity of the AWS Identity and Access Management (IAM) user or role that's making the call. The IAM user isn't deleted as a result of this call.

    The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/<user-name> .

    CLI Sample:

    aws quicksight delete-user --aws-account-id=111122223333 --namespace=default --user-name=Pat

    ", - "DeleteUserByPrincipalId": "

    Deletes a user identified by its principal ID.

    The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/<user-name> .

    CLI Sample:

    aws quicksight delete-user-by-principal-id --aws-account-id=111122223333 --namespace=default --principal-id=ABCDEFJA26JLI7EUUOEHS

    ", + "DeleteIAMPolicyAssignment": "

    Deletes an existing assignment.

    CLI syntax:

    aws quicksight delete-iam-policy-assignment --aws-account-id=111122223333 --assignment-name=testtest --region=us-east-1 --namespace=default

    ", + "DeleteTemplate": "

    Deletes a template.

    CLI syntax:

    If version number which is an optional field is not passed the template (including all the versions) is deleted by the API, if version number is provided, the specific template version is deleted by the API.

    Users can explicitly describe the latest version of the template by passing $LATEST to the alias-name parameter. $LATEST is an internally supported alias, which points to the latest version of the template.

    ", + "DeleteTemplateAlias": "

    Update template alias of given template.

    CLI syntax:

    aws quicksight delete-template-alias --aws-account-id 111122223333 --template-id 'reports_test_template' --alias-name 'STAGING'

    ", + "DeleteUser": "

    Deletes the Amazon QuickSight user that is associated with the identity of the AWS Identity and Access Management (IAM) user or role that's making the call. The IAM user isn't deleted as a result of this call.

    CLI Sample:

    aws quicksight delete-user --aws-account-id=111122223333 --namespace=default --user-name=Pat

    ", + "DeleteUserByPrincipalId": "

    Deletes a user identified by its principal ID.

    CLI Sample:

    aws quicksight delete-user-by-principal-id --aws-account-id=111122223333 --namespace=default --principal-id=ABCDEFJA26JLI7EUUOEHS

    ", + "DescribeDashboard": "

    Provides a summary for a dashboard.

    CLI syntax:

    ", + "DescribeDashboardPermissions": "

    Describes read and write permissions on a dashboard.

    CLI syntax:

    aws quicksight describe-dashboard-permissions --aws-account-id 735340738645 —dashboard-id reports_test_bob_report

    ", + "DescribeDataSet": "

    Describes a dataset.

    CLI syntax:

    aws quicksight describe-data-set \\

    --aws-account-id=111111111111 \\

    --data-set-id=unique-data-set-id

    ", + "DescribeDataSetPermissions": "

    Describes the permissions on a dataset.

    The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/data-set-id

    CLI syntax:

    aws quicksight describe-data-set-permissions \\

    --aws-account-id=111122223333 \\

    --data-set-id=unique-data-set-id \\

    ", + "DescribeDataSource": "

    Describes a data source.

    The permissions resource is arn:aws:quicksight:region:aws-account-id:datasource/data-source-id

    ", + "DescribeDataSourcePermissions": "

    Describes the resource permissions for a data source.

    The permissions resource is aws:quicksight:region:aws-account-id:datasource/data-source-id

    ", "DescribeGroup": "

    Returns an Amazon QuickSight group's description and Amazon Resource Name (ARN).

    The permissions resource is arn:aws:quicksight:us-east-1:<relevant-aws-account-id>:group/default/<group-name> .

    The response is the group object.

    CLI Sample:

    aws quicksight describe-group -\\-aws-account-id=11112222333 -\\-namespace=default -\\-group-name=Sales

    ", - "DescribeUser": "

    Returns information about a user, given the user name.

    The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/<user-name> .

    The response is a user object that contains the user's Amazon Resource Name (ARN), AWS Identity and Access Management (IAM) role, and email address.

    CLI Sample:

    aws quicksight describe-user --aws-account-id=111122223333 --namespace=default --user-name=Pat

    ", - "GetDashboardEmbedUrl": "

    Generates a server-side embeddable URL and authorization code. Before this can work properly, first you need to configure the dashboards and user permissions. For more information, see Embedding Amazon QuickSight Dashboards.

    Currently, you can use GetDashboardEmbedURL only from the server, not from the user’s browser.

    CLI Sample:

    Assume the role with permissions enabled for actions: quickSight:RegisterUser and quicksight:GetDashboardEmbedURL. You can use assume-role, assume-role-with-web-identity, or assume-role-with-saml.

    aws sts assume-role --role-arn \"arn:aws:iam::111122223333:role/embedding_quicksight_dashboard_role\" --role-session-name embeddingsession

    If the user does not exist in QuickSight, register the user:

    aws quicksight register-user --aws-account-id 111122223333 --namespace default --identity-type IAM --iam-arn \"arn:aws:iam::111122223333:role/embedding_quicksight_dashboard_role\" --user-role READER --session-name \"embeddingsession\" --email user123@example.com --region us-east-1

    Get the URL for the embedded dashboard

    aws quicksight get-dashboard-embed-url --aws-account-id 111122223333 --dashboard-id 1a1ac2b2-3fc3-4b44-5e5d-c6db6778df89 --identity-type IAM

    ", + "DescribeIAMPolicyAssignment": "

    Describes an existing IAMPolicy Assignment by specified assignment name.

    CLI syntax:

    aws quicksight describe-iam-policy-assignment --aws-account-id=111122223333 --assignment-name=testtest --namespace=default --region=us-east-1

    ", + "DescribeIngestion": "

    Describes a SPICE ingestion.

    ", + "DescribeTemplate": "

    Describes a template's metadata.

    CLI syntax:

    aws quicksight describe-template --aws-account-id 111122223333 --template-id reports_test_template

    aws quicksight describe-template --aws-account-id 111122223333 --template-id reports_test_template --version-number-2

    aws quicksight describe-template --aws-account-id 111122223333 --template-id reports_test_template --alias-name '\\$LATEST'

    Users can explicitly describe the latest version of the dashboard by passing $LATEST to the alias-name parameter. $LATEST is an internally supported alias, which points to the latest version of the dashboard.

    ", + "DescribeTemplateAlias": "

    Describes the template aliases of a template.

    CLI syntax:

    aws quicksight describe-template-alias --aws-account-id 111122223333 --template-id 'reports_test_template' --alias-name 'STAGING'

    ", + "DescribeTemplatePermissions": "

    Describes read and write permissions on a template.

    CLI syntax:

    aws quicksight describe-template-permissions —aws-account-id 735340738645 —template-id reports_test_template

    ", + "DescribeUser": "

    Returns information about a user, given the user name.

    The response is a user object that contains the user's Amazon Resource Name (ARN), AWS Identity and Access Management (IAM) role, and email address.

    CLI Sample:

    aws quicksight describe-user --aws-account-id=111122223333 --namespace=default --user-name=Pat

    ", + "GetDashboardEmbedUrl": "

    Generates a server-side embeddable URL and authorization code. Before this can work properly, first you need to configure the dashboards and user permissions. For more information, see Embedding Amazon QuickSight Dashboards.

    Currently, you can use GetDashboardEmbedURL only from the server, not from the user’s browser.

    CLI Sample:

    Assume the role with permissions enabled for actions: quickSight:RegisterUser and quicksight:GetDashboardEmbedURL. You can use assume-role, assume-role-with-web-identity, or assume-role-with-saml.

    aws sts assume-role --role-arn \"arn:aws:iam::111122223333:role/embedding_quicksight_dashboard_role\" --role-session-name embeddingsession

    If the user does not exist in QuickSight, register the user:

    aws quicksight register-user --aws-account-id 111122223333 --namespace default --identity-type IAM --iam-arn \"arn:aws:iam::111122223333:role/embedding_quicksight_dashboard_role\" --user-role READER --session-name \"embeddingsession\" --email user123@example.com --region us-east-1

    Get the URL for the embedded dashboard (IAM identity authentication):

    aws quicksight get-dashboard-embed-url --aws-account-id 111122223333 --dashboard-id 1a1ac2b2-3fc3-4b44-5e5d-c6db6778df89 --identity-type IAM

    Get the URL for the embedded dashboard (QUICKSIGHT identity authentication):

    aws quicksight get-dashboard-embed-url --aws-account-id 111122223333 --dashboard-id 1a1ac2b2-3fc3-4b44-5e5d-c6db6778df89 --identity-type QUICKSIGHT --user-arn arn:aws:quicksight:us-east-1:111122223333:user/default/embedding_quicksight_dashboard_role/embeddingsession

    ", + "ListDashboardVersions": "

    Lists all the versions of the dashboards in the Quicksight subscription.

    CLI syntax:

    aws quicksight list-template-versions —aws-account-id 111122223333 —template-id reports-test-template

    ", + "ListDashboards": "

    Lists dashboards in the AWS account.

    CLI syntax:

    aws quicksight list-dashboards --aws-account-id 111122223333 --max-results 5 —next-token 'next-10'

    ", + "ListDataSets": "

    Lists all of the datasets belonging to this account in an AWS region.

    The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/*

    CLI syntax: aws quicksight list-data-sets --aws-account-id=111111111111

    ", + "ListDataSources": "

    Lists data sources in current AWS region that belong to this AWS account.

    The permissions resource is: arn:aws:quicksight:region:aws-account-id:datasource/*

    CLI syntax: aws quicksight list-data-sources --aws-account-id=111122223333

    ", "ListGroupMemberships": "

    Lists member users in a group.

    The permissions resource is arn:aws:quicksight:us-east-1:<aws-account-id>:group/default/<group-name> .

    The response is a list of group member objects.

    CLI Sample:

    aws quicksight list-group-memberships -\\-aws-account-id=111122223333 -\\-namespace=default

    ", "ListGroups": "

    Lists all user groups in Amazon QuickSight.

    The permissions resource is arn:aws:quicksight:us-east-1:<aws-account-id>:group/default/*.

    The response is a list of group objects.

    CLI Sample:

    aws quicksight list-groups -\\-aws-account-id=111122223333 -\\-namespace=default

    ", - "ListUserGroups": "

    Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member of.

    The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/<user-name> .

    The response is a one or more group objects.

    CLI Sample:

    aws quicksight list-user-groups -\\-user-name=Pat -\\-aws-account-id=111122223333 -\\-namespace=default -\\-region=us-east-1

    ", - "ListUsers": "

    Returns a list of all of the Amazon QuickSight users belonging to this account.

    The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/* .

    The response is a list of user objects, containing each user's Amazon Resource Name (ARN), AWS Identity and Access Management (IAM) role, and email address.

    CLI Sample:

    aws quicksight list-users --aws-account-id=111122223333 --namespace=default

    ", - "RegisterUser": "

    Creates an Amazon QuickSight user, whose identity is associated with the AWS Identity and Access Management (IAM) identity or role specified in the request.

    The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/<user-name> .

    The condition resource is the Amazon Resource Name (ARN) for the IAM user or role, and the session name.

    The condition keys are quicksight:IamArn and quicksight:SessionName.

    CLI Sample:

    aws quicksight register-user -\\-aws-account-id=111122223333 -\\-namespace=default -\\-email=pat@example.com -\\-identity-type=IAM -\\-user-role=AUTHOR -\\-iam-arn=arn:aws:iam::111122223333:user/Pat

    ", + "ListIAMPolicyAssignments": "

    Lists assignments in current QuickSight account.

    CLI syntax:

    aws quicksight list-iam-policy-assignments --aws-account-id=111122223333 --max-result=5 --assignment-status=ENABLED --namespace=default --region=us-east-1 --next-token=3

    ", + "ListIAMPolicyAssignmentsForUser": "

    Lists all the assignments and the ARNs for the associated IAM policies assigned to the specified user and the group or groups that the user belongs to.

    CLI syntax:

    aws quicksight list-iam-policy-assignments-for-user --aws-account-id=111122223333 --user-name=user5 --namespace=default --max-result=6 --region=us-east-1

    ", + "ListIngestions": "

    Lists the history of SPICE ingestions for a dataset.

    ", + "ListTagsForResource": "

    Lists the tags assigned to a resource.

    CLI syntax:

    ", + "ListTemplateAliases": "

    Lists all the aliases of a template.

    CLI syntax:

    aws quicksight list-template-aliases --aws-account-id 111122223333 —template-id 'reports_test_template'

    ", + "ListTemplateVersions": "

    Lists all the versions of the templates in the Quicksight account.

    CLI syntax:

    aws quicksight list-template-versions --aws-account-id 111122223333 --aws-account-id 196359894473 --template-id reports-test-template

    ", + "ListTemplates": "

    Lists all the templates in the QuickSight account.

    CLI syntax:

    aws quicksight list-templates --aws-account-id 111122223333 --max-results 1 —next-token AYADeJuxwOypAndSoOn

    ", + "ListUserGroups": "

    Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member of.

    The response is a one or more group objects.

    CLI Sample:

    aws quicksight list-user-groups -\\-user-name=Pat -\\-aws-account-id=111122223333 -\\-namespace=default -\\-region=us-east-1

    ", + "ListUsers": "

    Returns a list of all of the Amazon QuickSight users belonging to this account.

    The response is a list of user objects, containing each user's Amazon Resource Name (ARN), AWS Identity and Access Management (IAM) role, and email address.

    CLI Sample:

    aws quicksight list-users --aws-account-id=111122223333 --namespace=default

    ", + "RegisterUser": "

    Creates an Amazon QuickSight user, whose identity is associated with the AWS Identity and Access Management (IAM) identity or role specified in the request.

    CLI Sample:

    aws quicksight register-user -\\-aws-account-id=111122223333 -\\-namespace=default -\\-email=pat@example.com -\\-identity-type=IAM -\\-user-role=AUTHOR -\\-iam-arn=arn:aws:iam::111122223333:user/Pat

    ", + "TagResource": "

    Assigns a tag or tags to a resource.

    Assigns one or more tags (key-value pairs) to the specified QuickSight resource. Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with certain tag values. You can use the TagResource action with a resource that already has tags. If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

    You can associate as many as 50 tags with a resource. QuickSight supports tagging on data-set, data-source, dashboard, template.

    Tagging for QuickSight works in a similar was to tagging for other AWS services, except for the following:

    CLI syntax to tag a resource:

    ", + "UntagResource": "

    Removes a tag or tags from a resource.

    CLI syntax:

    ", + "UpdateDashboard": "

    Updates a dashboard in the AWS account.

    CLI syntax:

    aws quicksight update-dashboard --aws-account-id 111122223333 --dashboard-id 123123123 --dashboard-name \"test-update102\" --source-entity SourceTemplate={Arn=arn:aws:quicksight:us-west-2:111122223333:template/sales-report-template2} --data-set-references DataSetPlaceholder=SalesDataSet,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/0e251aef-9ebf-46e1-b852-eb4fa33c1d3a

    aws quicksight update-dashboard --cli-input-json file://update-dashboard.json

    ", + "UpdateDashboardPermissions": "

    Updates read and write permissions on a dashboard.

    CLI syntax:

    aws quicksight update-dashboard-permissions —cli-input-json file://update-permission.json

    A sample update-permissions.json for granting read only permissions:

    { \"AwsAccountId\": \"111122223333\", \"DashboardId\": \"reports_test_report\", \"GrantPermissions\": [ { \"Principal\": \"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\", \"Actions\": [ \"quicksight:DescribeDashboard\", \"quicksight:ListDashboardVersions\", \"quicksight:DescribeDashboardVersion\", \"quicksight:QueryDashboard\" ] } ] }

    A sample update-permissions.json for granting read and write permissions:

    { \"AwsAccountId\": \"111122223333\", \"DashboardId\": \"reports_test_report\", \"GrantPermissions\": [ { \"Principal\": \"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\", \"Actions\": [ \"quicksight:DescribeDashboard\", \"quicksight:ListDashboardVersions\", \"quicksight:DescribeDashboardVersion\", \"quicksight:QueryDashboard\", \"quicksight:DescribeDashboardPermissions\", \"quicksight:UpdateDashboardPermissions\", \"quicksight:DeleteDashboardVersion\", \"quicksight:DeleteDashboard\", \"quicksight:UpdateDashboard\", \"quicksight:UpdateDashboardPublishedVersion\", ] } ] }

    A sample update-permissions.json for revoking write permissions:

    { \"AwsAccountId\": \"111122223333\", \"DashboardId\": \"reports_test_report\", \"RevokePermissions\": [ { \"Principal\": \"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\", \"Actions\": [ \"quicksight:DescribeDashboardPermissions\", \"quicksight:UpdateDashboardPermissions\", \"quicksight:DeleteDashboardVersion\", \"quicksight:DeleteDashboard\", \"quicksight:UpdateDashboard\", \"quicksight:UpdateDashboardPublishedVersion\", ] } ] }

    A sample update-permissions.json for revoking read and write permissions:

    { \"AwsAccountId\": \"111122223333\", \"DashboardId\": \"reports_test_report\", \"RevokePermissions\": [ { \"Principal\": \"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\", \"Actions\": [ \"quicksight:DescribeDashboard\", \"quicksight:ListDashboardVersions\", \"quicksight:DescribeDashboardVersion\", \"quicksight:QueryDashboard\", \"quicksight:DescribeDashboardPermissions\", \"quicksight:UpdateDashboardPermissions\", \"quicksight:DeleteDashboardVersion\", \"quicksight:DeleteDashboard\", \"quicksight:UpdateDashboard\", \"quicksight:UpdateDashboardPublishedVersion\", ] } ] }

    To obtain the principal name of a QuickSight user or group, you can use describe-group or describe-user. For example:

    aws quicksight describe-user --aws-account-id 111122223333 --namespace default --user-name user2 --region us-east-1 { \"User\": { \"Arn\": \"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\", \"Active\": true, \"Email\": \"user2@example.com\", \"Role\": \"ADMIN\", \"UserName\": \"user2\", \"PrincipalId\": \"federated/iam/abcd2abcdabcdeabc5ab5\" }, \"RequestId\": \"8f74bb31-6291-448a-a71c-a765a44bae31\", \"Status\": 200 }

    ", + "UpdateDashboardPublishedVersion": "

    Updates the published version of a dashboard.

    CLI syntax:

    aws quicksight update-dashboard-published-version --aws-account-id 111122223333 --dashboard-id dashboard-w1 ---version-number 2

    ", + "UpdateDataSet": "

    Updates a dataset.

    CLI syntax:

    aws quicksight update-data-set \\

    --aws-account-id=111122223333 \\

    --data-set-id=unique-data-set-id \\

    --name='My dataset' \\

    --import-mode=SPICE \\

    --physical-table-map='{

    \"physical-table-id\": {

    \"RelationalTable\": {

    \"DataSourceArn\": \"arn:aws:quicksight:us-west-2:111111111111:datasource/data-source-id\",

    \"Name\": \"table1\",

    \"InputColumns\": [

    {

    \"Name\": \"column1\",

    \"Type\": \"STRING\"

    }

    ]

    }

    }'

    ", + "UpdateDataSetPermissions": "

    Updates the permissions on a dataset.

    The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/data-set-id

    CLI syntax:

    aws quicksight update-data-set-permissions \\

    --aws-account-id=111122223333 \\

    --data-set-id=unique-data-set-id \\

    --grant-permissions='[{\"Principal\":\"arn:aws:quicksight:us-east-1:111122223333:user/default/user1\",\"Actions\":[\"quicksight:DescribeDataSet\",\"quicksight:DescribeDataSetPermissions\",\"quicksight:PassDataSet\",\"quicksight:ListIngestions\",\"quicksight:DescribeIngestion\"]}]' \\

    --revoke-permissions='[{\"Principal\":\"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\",\"Actions\":[\"quicksight:UpdateDataSet\",\"quicksight:DeleteDataSet\",\"quicksight:UpdateDataSetPermissions\",\"quicksight:CreateIngestion\",\"quicksight:CancelIngestion\"]}]'

    ", + "UpdateDataSource": "

    Updates a data source.

    The permissions resource is arn:aws:quicksight:region:aws-account-id:datasource/data-source-id

    CLI syntax:

    aws quicksight update-data-source \\

    --aws-account-id=111122223333 \\

    --data-source-id=unique-data-source-id \\

    --name='My Data Source' \\

    --data-source-parameters='{\"PostgreSqlParameters\":{\"Host\":\"my-db-host.example.com\",\"Port\":1234,\"Database\":\"my-db\"}}' \\

    --credentials='{\"CredentialPair\":{\"Username\":\"username\",\"Password\":\"password\"}}

    ", + "UpdateDataSourcePermissions": "

    Updates the permissions to a data source.

    The permissions resource is arn:aws:quicksight:region:aws-account-id:datasource/data-source-id

    CLI syntax:

    aws quicksight update-data-source-permissions \\

    --aws-account-id=111122223333 \\

    --data-source-id=unique-data-source-id \\

    --name='My Data Source' \\

    --grant-permissions='[{\"Principal\":\"arn:aws:quicksight:us-east-1:111122223333:user/default/user1\",\"Actions\":[\"quicksight:DescribeDataSource\",\"quicksight:DescribeDataSourcePermissions\",\"quicksight:PassDataSource\"]}]' \\

    --revoke-permissions='[{\"Principal\":\"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\",\"Actions\":[\"quicksight:UpdateDataSource\",\"quicksight:DeleteDataSource\",\"quicksight:UpdateDataSourcePermissions\"]}]'

    ", "UpdateGroup": "

    Changes a group description.

    The permissions resource is arn:aws:quicksight:us-east-1:<aws-account-id>:group/default/<group-name> .

    The response is a group object.

    CLI Sample:

    aws quicksight update-group --aws-account-id=111122223333 --namespace=default --group-name=Sales --description=\"Sales BI Dashboards\"

    ", - "UpdateUser": "

    Updates an Amazon QuickSight user.

    The permission resource is arn:aws:quicksight:us-east-1:<aws-account-id>:user/default/<user-name> .

    The response is a user object that contains the user's Amazon QuickSight user name, email address, active or inactive status in Amazon QuickSight, Amazon QuickSight role, and Amazon Resource Name (ARN).

    CLI Sample:

    aws quicksight update-user --user-name=Pat --role=ADMIN --email=new_address@amazon.com --aws-account-id=111122223333 --namespace=default --region=us-east-1

    " + "UpdateIAMPolicyAssignment": "

    Updates an existing assignment. This operation updates only the optional parameter or parameters that are specified in the request.

    CLI syntax:

    aws quicksight update-iam-policy-assignment --aws-account-id=111122223333 --assignment-name=FullAccessAssignment --assignment-status=DRAFT --policy-arns=arn:aws:iam::aws:policy/AdministratorAccess --identities=\"user=user-1,user-2,group=admin\" --namespace=default --region=us-east-1

    ", + "UpdateTemplate": "

    Updates a template from an existing QuickSight analysis.

    CLI syntax:

    aws quicksight update-template --aws-account-id 111122223333 --template-id reports_test_template --data-set-references DataSetPlaceholder=reports,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/c684a204-d134-4c53-a63c-451f72c60c28 DataSetPlaceholder=Elblogs,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/15840b7d-b542-4491-937b-602416b367b3 —source-entity SourceAnalysis=’{Arn=arn:aws:quicksight:us-west-2:111122223333:analysis/c5731fe9-4708-4598-8f6d-cf2a70875b6d}

    You can also pass in a json file: aws quicksight update-template —cli-input-json file://create-template.json

    ", + "UpdateTemplateAlias": "

    Updates the template alias of a template.

    CLI syntax:

    aws quicksight update-template-alias --aws-account-id 111122223333 --template-id 'reports_test_template' --alias-name STAGING —template-version-number 2

    ", + "UpdateTemplatePermissions": "

    Updates the permissions on a template.

    CLI syntax:

    • aws quicksight describe-template-permissions —aws-account-id 111122223333 —template-id reports_test_template

    • aws quicksight update-template-permissions —cli-input-json file://update-permission.json

    • The structure of update-permissions.json to add permissions:

      { \"AwsAccountId\": \"111122223333\",

      \"DashboardId\": \"reports_test_template\",

      \"GrantPermissions\": [

      { \"Principal\": \"arn:aws:quicksight:us-east-1:196359894473:user/default/user3\",

      \"Actions\": [

      \"quicksight:DescribeTemplate\",

      \"quicksight:ListTemplateVersions\"

      ] } ] }

      The structure of update-permissions.json to add permissions:

      { \"AwsAccountId\": \"111122223333\",

      \"DashboardId\": \"reports_test_template\",

      \"RevokePermissions\": [

      { \"Principal\": \"arn:aws:quicksight:us-east-1:196359894473:user/default/user3\",

      \"Actions\": [

      \"quicksight:DescribeTemplate\",

      \"quicksight:ListTemplateVersions\"

      ] } ] }

      To obtain the principal name of a QuickSight group or user, use user describe-group or describe-user. For example:

      aws quicksight describe-user

      --aws-account-id 111122223333

      --namespace default

      --user-name user2

      --region us-east-1

      {

      \"User\": {

      \"Arn\": \"arn:aws:quicksight:us-east-1:111122223333:user/default/user2\",

      \"Active\": true,

      \"Email\": \"user2@example.com\",

      \"Role\": \"ADMIN\",

      \"UserName\": \"user2\",

      \"PrincipalId\": \"federated/iam/abcd2abcdabcdeabc5ab5\"

      },

      \"RequestId\": \"8f74bb31-6291-448a-a71c-a765a44bae31\",

      \"Status\": 200

      }

    ", + "UpdateUser": "

    Updates an Amazon QuickSight user.

    The response is a user object that contains the user's Amazon QuickSight user name, email address, active or inactive status in Amazon QuickSight, Amazon QuickSight role, and Amazon Resource Name (ARN).

    CLI Sample:

    aws quicksight update-user --user-name=Pat --role=ADMIN --email=new_address@example.com --aws-account-id=111122223333 --namespace=default --region=us-east-1

    " }, "shapes": { "AccessDeniedException": { @@ -25,383 +74,2121 @@ "refs": { } }, + "ActionList": { + "base": null, + "refs": { + "ResourcePermission$Actions": "

    The action to grant or revoke permissions on. For example, \"quicksight:DescribeDashboard\".

    " + } + }, + "ActiveIAMPolicyAssignment": { + "base": "

    The active IAM policy assignment.

    ", + "refs": { + "ActiveIAMPolicyAssignmentList$member": null + } + }, + "ActiveIAMPolicyAssignmentList": { + "base": null, + "refs": { + "ListIAMPolicyAssignmentsForUserResponse$ActiveAssignments": "

    Active assignments for this user.

    " + } + }, + "AdHocFilteringOption": { + "base": "

    Ad hoc filtering option.

    ", + "refs": { + "DashboardPublishOptions$AdHocFilteringOption": "

    Ad hoc filtering option.

    " + } + }, + "AliasName": { + "base": null, + "refs": { + "CreateTemplateAliasRequest$AliasName": "

    The name you want to give the template's alias. Alias names can't begin with a $, which is reserved by QuickSight. Alias names that start with ‘$’ sign are QuickSight reserved naming and can't be deleted.

    ", + "DeleteTemplateAliasRequest$AliasName": "

    The alias of the template. If alias-name is provided, the version that the alias-name points to is deleted. Alias names that start with $ are reserved by QuickSight and can't be deleted.”

    ", + "DeleteTemplateAliasResponse$AliasName": "

    The name of the alias.

    ", + "DescribeDashboardRequest$AliasName": "

    The alias name.

    ", + "DescribeTemplateAliasRequest$AliasName": "

    The alias name. $PUBLISHED is not supported for template.

    ", + "DescribeTemplateRequest$AliasName": "

    This is an optional field, when an alias name is provided, the version referenced by the alias is described. Refer to CreateTemplateAlias to create a template alias. $PUBLISHED is not supported for template.

    ", + "TemplateAlias$AliasName": "

    The display name of the template alias.

    ", + "UpdateTemplateAliasRequest$AliasName": "

    The alias name.

    " + } + }, + "AmazonElasticsearchParameters": { + "base": "

    Amazon Elasticsearch parameters.

    ", + "refs": { + "DataSourceParameters$AmazonElasticsearchParameters": "

    Amazon Elasticsearch parameters.

    " + } + }, "Arn": { "base": null, "refs": { - "GetDashboardEmbedUrlRequest$UserArn": "

    The Amazon QuickSight user's ARN, for use with QUICKSIGHT identity type. You can use this for any of the following:

    • Amazon QuickSight users in your account (readers, authors, or admins)

    • AD users

    • Invited non-federated users

    • Federated IAM users

    • Federated IAM role-based sessions

    ", - "Group$Arn": "

    The Amazon Resource Name (ARN) for the group.

    ", - "GroupMember$Arn": "

    The Amazon Resource Name (ARN) for the group member (user).

    ", - "User$Arn": "

    The Amazon Resource Name (ARN) for the user.

    " + "ActiveIAMPolicyAssignment$PolicyArn": "

    The ARN of the resource.

    ", + "CancelIngestionResponse$Arn": "

    The Amazon Resource Name (ARN) for the data ingestion.

    ", + "CreateDashboardResponse$Arn": "

    The ARN of the dashboard.

    ", + "CreateDashboardResponse$VersionArn": "

    The ARN of the dashboard, including the version number of the first version that is created.

    ", + "CreateDataSetResponse$Arn": "

    The ARN of the dataset.

    ", + "CreateDataSetResponse$IngestionArn": "

    The Amazon Resource Name (ARN) for the ingestion, which is triggered as a result of dataset creation if the import mode is SPICE

    ", + "CreateDataSourceResponse$Arn": "

    The ARN of the data source.

    ", + "CreateIAMPolicyAssignmentRequest$PolicyArn": "

    An IAM policy ARN that you want to apply to the QuickSight users and groups specified in this assignment.

    ", + "CreateIAMPolicyAssignmentResponse$PolicyArn": "

    An IAM policy ARN that is applied to the QuickSight users and groups specified in this assignment.

    ", + "CreateIngestionResponse$Arn": "

    The Amazon Resource Name (ARN) for the data ingestion.

    ", + "CreateTemplateResponse$Arn": "

    The Amazon Resource Name (ARN) for the template.

    ", + "CreateTemplateResponse$VersionArn": "

    The Amazon Resource Name (ARN) for the template, including the version information of the first version.

    ", + "CustomSql$DataSourceArn": "

    The ARN of the data source.

    ", + "Dashboard$Arn": "

    The Amazon Resource name (ARN) of the resource.

    ", + "DashboardSourceTemplate$Arn": "

    The Amazon Resource name (ARN) of the resource.

    ", + "DashboardSummary$Arn": "

    The Amazon Resource name (ARN) of the resource.

    ", + "DashboardVersion$Arn": "

    The Amazon Resource name (ARN) of the resource.

    ", + "DashboardVersion$SourceEntityArn": "

    Source entity ARN.

    ", + "DashboardVersionSummary$Arn": "

    The Amazon Resource name (ARN) of the resource.

    ", + "DashboardVersionSummary$SourceEntityArn": "

    Source entity ARN.

    ", + "DataSet$Arn": "

    The Amazon Resource name (ARN) of the resource.

    ", + "DataSetReference$DataSetArn": "

    Dataset ARN.

    ", + "DataSetSummary$Arn": "

    The Amazon Resource name (ARN) of the dataset.

    ", + "DataSource$Arn": "

    The Amazon Resource name (ARN) of the data source.

    ", + "DeleteDashboardResponse$Arn": "

    The ARN of the resource.

    ", + "DeleteDataSetResponse$Arn": "

    The ARN of the dataset.

    ", + "DeleteDataSourceResponse$Arn": "

    The ARN of the data source you deleted.

    ", + "DeleteTemplateAliasResponse$Arn": "

    The ARN of the resource.

    ", + "DeleteTemplateResponse$Arn": "

    The ARN of the resource.

    ", + "DescribeDashboardPermissionsResponse$DashboardArn": "

    The ARN of the dashboard.

    ", + "DescribeDataSetPermissionsResponse$DataSetArn": "

    The ARN of the dataset.

    ", + "DescribeDataSourcePermissionsResponse$DataSourceArn": "

    The ARN of the data source.

    ", + "DescribeTemplatePermissionsResponse$TemplateArn": "

    The ARN of the template.

    ", + "GetDashboardEmbedUrlRequest$UserArn": "

    The Amazon QuickSight user's ARN, for use with QUICKSIGHT identity type. You can use this for any Amazon QuickSight users in your account (readers, authors, or admins) authenticated as one of the following:

    • Active Directory (AD) users or group members

    • Invited non-federated users

    • IAM users and IAM role-based sessions authenticated through Federated Single Sign-On using SAML, OpenID Connect, or IAM Federation

    ", + "Group$Arn": "

    The Amazon Resource name (ARN) for the group.

    ", + "GroupMember$Arn": "

    The Amazon Resource name (ARN) for the group member (user).

    ", + "IAMPolicyAssignment$PolicyArn": "

    Policy ARN.

    ", + "Ingestion$Arn": "

    The Amazon Resource name (ARN) of the resource.

    ", + "ListTagsForResourceRequest$ResourceArn": "

    The ARN of the resource you want a list of tags for.

    ", + "RelationalTable$DataSourceArn": "

    Data source ARN.

    ", + "RowLevelPermissionDataSet$Arn": "

    The Amazon Resource name (ARN) of the permission dataset.

    ", + "S3Source$DataSourceArn": "

    Data source ARN.

    ", + "TagResourceRequest$ResourceArn": "

    The ARN of the resource you want to tag.

    ", + "Template$Arn": "

    The ARN of the template.

    ", + "TemplateAlias$Arn": "

    The ARN of the template alias.

    ", + "TemplateSourceAnalysis$Arn": "

    The Amazon Resource name (ARN) of the resource.

    ", + "TemplateSourceTemplate$Arn": "

    The Amazon Resource name (ARN) of the resource.

    ", + "TemplateSummary$Arn": "

    A summary of a template.

    ", + "TemplateVersion$SourceEntityArn": "

    The ARN of the analysis or template which was used to create this template.

    ", + "TemplateVersionSummary$Arn": "

    The ARN of the template version.

    ", + "UntagResourceRequest$ResourceArn": "

    The ARN of the resource you to untag.

    ", + "UpdateDashboardPermissionsResponse$DashboardArn": "

    The ARN of the dashboard.

    ", + "UpdateDashboardPublishedVersionResponse$DashboardArn": "

    The ARN of the dashboard.

    ", + "UpdateDashboardResponse$Arn": "

    The ARN of the resource.

    ", + "UpdateDashboardResponse$VersionArn": "

    The ARN of the dashboard, including the version number.

    ", + "UpdateDataSetPermissionsResponse$DataSetArn": "

    The ARN of the dataset.

    ", + "UpdateDataSetResponse$Arn": "

    The ARN of the dataset.

    ", + "UpdateDataSetResponse$IngestionArn": "

    The Amazon Resource Name (ARN) for the ingestion, which is triggered as a result of dataset creation if the import mode is SPICE

    ", + "UpdateDataSourcePermissionsResponse$DataSourceArn": "

    The ARN of the data source.

    ", + "UpdateDataSourceResponse$Arn": "

    The ARN of the data source.

    ", + "UpdateIAMPolicyAssignmentRequest$PolicyArn": "

    An IAM policy ARN that will be applied to specified QuickSight users and groups in this assignment.

    ", + "UpdateIAMPolicyAssignmentResponse$PolicyArn": "

    The IAM policy ARN assigned to the QuickSight users and groups specified in this request.

    ", + "UpdateTemplatePermissionsResponse$TemplateArn": "

    The ARN of the template.

    ", + "UpdateTemplateResponse$Arn": "

    The Amazon Resource Name (ARN) for the template.

    ", + "UpdateTemplateResponse$VersionArn": "

    The Amazon Resource Name (ARN) for the template, including the version information of the first version.

    ", + "User$Arn": "

    The Amazon Resource name (ARN) for the user.

    ", + "VpcConnectionProperties$VpcConnectionArn": "

    VPC connection ARN.

    " + } + }, + "AssignmentStatus": { + "base": null, + "refs": { + "CreateIAMPolicyAssignmentRequest$AssignmentStatus": "

    The status of an assignment:

    • ENABLED - Anything specified in this assignment is used while creating the data source.

    • DISABLED - This assignment isn't used while creating the data source.

    • DRAFT - Assignment is an unfinished draft and isn't used while creating the data source.

    ", + "CreateIAMPolicyAssignmentResponse$AssignmentStatus": "

    The status of an assignment:

    • ENABLED - Anything specified in this assignment is used while creating the data source.

    • DISABLED - This assignment isn't used while creating the data source.

    • DRAFT - Assignment is an unfinished draft and isn't used while creating the data source.

    ", + "IAMPolicyAssignment$AssignmentStatus": "

    Assignment status.

    ", + "IAMPolicyAssignmentSummary$AssignmentStatus": "

    Assignment status.

    ", + "ListIAMPolicyAssignmentsRequest$AssignmentStatus": "

    The status of the assignment.

    ", + "UpdateIAMPolicyAssignmentRequest$AssignmentStatus": "

    The status of an assignment:

    • ENABLED - Anything specified in this assignment is used while creating the data source.

    • DISABLED - This assignment isn't used while creating the data source.

    • DRAFT - Assignment is an unfinished draft and isn't used while creating the data source.

    ", + "UpdateIAMPolicyAssignmentResponse$AssignmentStatus": "

    The status of the assignment:

    • ENABLED - Anything specified in this assignment is used while creating the data source.

    • DISABLED - This assignment isn't used while creating the data source.

    • DRAFT - Assignment is an unfinished draft and isn't used while creating the data source.

    " + } + }, + "AthenaParameters": { + "base": "

    Athena parameters.

    ", + "refs": { + "DataSourceParameters$AthenaParameters": "

    Athena parameters.

    " + } + }, + "AuroraParameters": { + "base": "

    Aurora parameters.

    ", + "refs": { + "DataSourceParameters$AuroraParameters": "

    Aurora MySQL parameters.

    " + } + }, + "AuroraPostgreSqlParameters": { + "base": "

    Aurora PostgreSQL parameters.

    ", + "refs": { + "DataSourceParameters$AuroraPostgreSqlParameters": "

    Aurora PostgreSQL parameters.

    " } }, "AwsAccountId": { "base": null, "refs": { + "CancelIngestionRequest$AwsAccountId": "

    The AWS account ID.

    ", + "CreateDashboardRequest$AwsAccountId": "

    AWS account ID where you want to create the dashboard.

    ", + "CreateDataSetRequest$AwsAccountId": "

    The AWS Account ID.

    ", + "CreateDataSourceRequest$AwsAccountId": "

    The AWS account ID.

    ", "CreateGroupMembershipRequest$AwsAccountId": "

    The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", "CreateGroupRequest$AwsAccountId": "

    The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", + "CreateIAMPolicyAssignmentRequest$AwsAccountId": "

    The AWS Account ID where you want to assign QuickSight users or groups to an IAM policy.

    ", + "CreateIngestionRequest$AwsAccountId": "

    The AWS account ID.

    ", + "CreateTemplateAliasRequest$AwsAccountId": "

    AWS account ID that contains the template you are aliasing.

    ", + "CreateTemplateRequest$AwsAccountId": "

    The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", + "DeleteDashboardRequest$AwsAccountId": "

    AWS account ID that contains the dashboard you are deleting.

    ", + "DeleteDataSetRequest$AwsAccountId": "

    The AWS Account ID.

    ", + "DeleteDataSourceRequest$AwsAccountId": "

    The AWS account ID.

    ", "DeleteGroupMembershipRequest$AwsAccountId": "

    The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", "DeleteGroupRequest$AwsAccountId": "

    The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", + "DeleteIAMPolicyAssignmentRequest$AwsAccountId": "

    The AWS account ID where you want to delete an IAM policy assignment.

    ", + "DeleteTemplateAliasRequest$AwsAccountId": "

    AWS account ID that contains the template alias you are deleting.

    ", + "DeleteTemplateRequest$AwsAccountId": "

    AWS account ID that contains the template you are deleting.

    ", "DeleteUserByPrincipalIdRequest$AwsAccountId": "

    The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", "DeleteUserRequest$AwsAccountId": "

    The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", + "DescribeDashboardPermissionsRequest$AwsAccountId": "

    AWS account ID that contains the dashboard you are describing permissions of.

    ", + "DescribeDashboardRequest$AwsAccountId": "

    AWS account ID that contains the dashboard you are describing.

    ", + "DescribeDataSetPermissionsRequest$AwsAccountId": "

    The AWS Account ID.

    ", + "DescribeDataSetRequest$AwsAccountId": "

    The AWS Account ID.

    ", + "DescribeDataSourcePermissionsRequest$AwsAccountId": "

    The AWS account ID.

    ", + "DescribeDataSourceRequest$AwsAccountId": "

    The AWS account ID.

    ", "DescribeGroupRequest$AwsAccountId": "

    The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", + "DescribeIAMPolicyAssignmentRequest$AwsAccountId": "

    The AWS account ID that contains the assignment you want to describe.

    ", + "DescribeIngestionRequest$AwsAccountId": "

    The AWS account ID.

    ", + "DescribeTemplateAliasRequest$AwsAccountId": "

    AWS account ID that contains the template alias you are describing.

    ", + "DescribeTemplatePermissionsRequest$AwsAccountId": "

    AWS account ID that contains the template you are describing.

    ", + "DescribeTemplateRequest$AwsAccountId": "

    AWS account ID that contains the template you are describing.

    ", "DescribeUserRequest$AwsAccountId": "

    The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", "GetDashboardEmbedUrlRequest$AwsAccountId": "

    AWS account ID that contains the dashboard you are embedding.

    ", + "IAMPolicyAssignment$AwsAccountId": "

    AWS account ID.

    ", + "ListDashboardVersionsRequest$AwsAccountId": "

    AWS account ID that contains the dashboard you are listing.

    ", + "ListDashboardsRequest$AwsAccountId": "

    AWS account ID that contains the dashboards you are listing.

    ", + "ListDataSetsRequest$AwsAccountId": "

    The AWS Account ID.

    ", + "ListDataSourcesRequest$AwsAccountId": "

    The AWS account ID.

    ", "ListGroupMembershipsRequest$AwsAccountId": "

    The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", "ListGroupsRequest$AwsAccountId": "

    The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", + "ListIAMPolicyAssignmentsForUserRequest$AwsAccountId": "

    The AWS account ID that contains the assignment.

    ", + "ListIAMPolicyAssignmentsRequest$AwsAccountId": "

    The AWS account ID that contains this IAM policy assignment.

    ", + "ListIngestionsRequest$AwsAccountId": "

    The AWS account ID.

    ", + "ListTemplateAliasesRequest$AwsAccountId": "

    AWS account ID that contains the template aliases you are listing.

    ", + "ListTemplateVersionsRequest$AwsAccountId": "

    AWS account ID that contains the templates you are listing.

    ", + "ListTemplatesRequest$AwsAccountId": "

    AWS account ID that contains the templates you are listing.

    ", "ListUserGroupsRequest$AwsAccountId": "

    The AWS Account ID that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", "ListUsersRequest$AwsAccountId": "

    The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", "RegisterUserRequest$AwsAccountId": "

    The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", + "UpdateDashboardPermissionsRequest$AwsAccountId": "

    AWS account ID that contains the dashboard you are updating.

    ", + "UpdateDashboardPublishedVersionRequest$AwsAccountId": "

    AWS account ID that contains the dashboard you are updating.

    ", + "UpdateDashboardRequest$AwsAccountId": "

    AWS account ID that contains the dashboard you are updating.

    ", + "UpdateDataSetPermissionsRequest$AwsAccountId": "

    The AWS Account ID.

    ", + "UpdateDataSetRequest$AwsAccountId": "

    The AWS Account ID.

    ", + "UpdateDataSourcePermissionsRequest$AwsAccountId": "

    The AWS account ID.

    ", + "UpdateDataSourceRequest$AwsAccountId": "

    The AWS account ID.

    ", "UpdateGroupRequest$AwsAccountId": "

    The ID for the AWS account that the group is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    ", + "UpdateIAMPolicyAssignmentRequest$AwsAccountId": "

    The AWS account ID that contains the IAM policy assignment.

    ", + "UpdateTemplateAliasRequest$AwsAccountId": "

    AWS account ID that contains the template aliases you are updating.

    ", + "UpdateTemplatePermissionsRequest$AwsAccountId": "

    AWS account ID that contains the template.

    ", + "UpdateTemplateRequest$AwsAccountId": "

    AWS account ID that contains the template you are updating.

    ", "UpdateUserRequest$AwsAccountId": "

    The ID for the AWS account that the user is in. Currently, you use the ID for the AWS account that contains your Amazon QuickSight account.

    " } }, + "AwsIotAnalyticsParameters": { + "base": "

    AWS IoT Analytics parameters.

    ", + "refs": { + "DataSourceParameters$AwsIotAnalyticsParameters": "

    AWS IoT Analytics parameters.

    " + } + }, "Boolean": { "base": null, "refs": { - "User$Active": "

    Active status of user. When you create an Amazon QuickSight user that’s not an IAM user or an AD user, that user is inactive until they sign in and provide a password

    " + "SslProperties$DisableSsl": "

    A boolean flag to control whether SSL should be disabled.

    ", + "UploadSettings$ContainsHeader": "

    Whether or not the file(s) has a header row.

    ", + "User$Active": "

    Active status of user. When you create an Amazon QuickSight user that’s not an IAM user or an AD user, that user is inactive until they sign in and provide a password.

    " } }, - "CreateGroupMembershipRequest": { + "CalculatedColumn": { + "base": "

    A calculated column for a dataset.

    ", + "refs": { + "CalculatedColumnList$member": null + } + }, + "CalculatedColumnList": { "base": null, "refs": { + "CreateColumnsOperation$Columns": "

    Calculated columns to create.

    " } }, - "CreateGroupMembershipResponse": { + "CancelIngestionRequest": { "base": null, "refs": { } }, - "CreateGroupRequest": { - "base": "

    The request object for this operation.

    ", + "CancelIngestionResponse": { + "base": null, "refs": { } }, - "CreateGroupResponse": { - "base": "

    The response object for this operation.

    ", + "CastColumnTypeOperation": { + "base": "

    A transform operation that casts a column to a different type.

    ", "refs": { + "TransformOperation$CastColumnTypeOperation": "

    A transform operation that casts a column to a different type.

    " } }, - "DeleteGroupMembershipRequest": { + "Catalog": { "base": null, "refs": { + "PrestoParameters$Catalog": "

    Catalog.

    " } }, - "DeleteGroupMembershipResponse": { + "ClusterId": { "base": null, "refs": { + "RedshiftParameters$ClusterId": "

    Cluster ID. This can be blank if the Host and Port are provided.

    " } }, - "DeleteGroupRequest": { + "ColumnDataType": { "base": null, "refs": { + "CastColumnTypeOperation$NewColumnType": "

    New column data type.

    ", + "OutputColumn$Type": "

    Type.

    " } }, - "DeleteGroupResponse": { + "ColumnGroup": { + "base": "

    Groupings of columns that work together in certain QuickSight features. This is a variant type structure. No more than one of the attributes should be non-null for this structure to be valid.

    ", + "refs": { + "ColumnGroupList$member": null + } + }, + "ColumnGroupColumnSchema": { + "base": "

    A structure describing the name, datatype, and geographic role of the columns.

    ", + "refs": { + "ColumnGroupColumnSchemaList$member": null + } + }, + "ColumnGroupColumnSchemaList": { "base": null, "refs": { + "ColumnGroupSchema$ColumnGroupColumnSchemaList": "

    A structure containing the list of column group column schemas.

    " } }, - "DeleteUserByPrincipalIdRequest": { - "base": "

    ", + "ColumnGroupList": { + "base": null, "refs": { + "CreateDataSetRequest$ColumnGroups": "

    Groupings of columns that work together in certain QuickSight features. Currently only geospatial hierarchy is supported.

    ", + "DataSet$ColumnGroups": "

    Groupings of columns that work together in certain QuickSight features. Currently only geospatial hierarchy is supported.

    ", + "UpdateDataSetRequest$ColumnGroups": "

    Groupings of columns that work together in certain QuickSight features. Currently only geospatial hierarchy is supported.

    " } }, - "DeleteUserByPrincipalIdResponse": { + "ColumnGroupName": { "base": null, "refs": { + "GeoSpatialColumnGroup$Name": "

    A display name for the hierarchy.

    " } }, - "DeleteUserRequest": { + "ColumnGroupSchema": { + "base": "

    The column group schema.

    ", + "refs": { + "ColumnGroupSchemaList$member": null + } + }, + "ColumnGroupSchemaList": { "base": null, "refs": { + "DataSetConfiguration$ColumnGroupSchemaList": "

    A structure containing the list of column group schemas.

    " } }, - "DeleteUserResponse": { + "ColumnId": { "base": null, "refs": { + "CalculatedColumn$ColumnId": "

    A unique ID to identify a calculated column. During dataset update, if the column ID of a calculated column matches that of an existing calculated column, QuickSight preserves the existing calculated column.

    " } }, - "DescribeGroupRequest": { + "ColumnList": { "base": null, "refs": { + "GeoSpatialColumnGroup$Columns": "

    Columns in this hierarchy.

    " } }, - "DescribeGroupResponse": { + "ColumnName": { "base": null, "refs": { + "CalculatedColumn$ColumnName": "

    Column name.

    ", + "CastColumnTypeOperation$ColumnName": "

    Column name.

    ", + "ColumnList$member": null, + "InputColumn$Name": "

    The name of this column in the underlying data source.

    ", + "OutputColumn$Name": "

    A display name for the dataset.

    ", + "RenameColumnOperation$ColumnName": "

    Name of the column to be renamed.

    ", + "RenameColumnOperation$NewColumnName": "

    New name for the column.

    ", + "TagColumnOperation$ColumnName": "

    The column that this operation acts on.

    " } }, - "DescribeUserRequest": { + "ColumnSchema": { + "base": "

    The column schema.

    ", + "refs": { + "ColumnSchemaList$member": null + } + }, + "ColumnSchemaList": { "base": null, "refs": { + "DataSetSchema$ColumnSchemaList": "

    A structure containing the list of column schemas.

    " } }, - "DescribeUserResponse": { + "ColumnTag": { + "base": "

    A tag for a column in a TagColumnOperation. This is a variant type structure. No more than one of the attributes should be non-null for this structure to be valid.

    ", + "refs": { + "ColumnTagList$member": null + } + }, + "ColumnTagList": { "base": null, "refs": { + "TagColumnOperation$Tags": "

    The dataset column tag, currently only used for geospatial type tagging. .

    This is not tags for the AWS tagging feature. .

    " } }, - "DomainNotWhitelistedException": { - "base": "

    The domain specified is not on the allowlist. All domains for embedded dashboards must be added to the approved list by an Amazon QuickSight admin.

    ", + "ConcurrentUpdatingException": { + "base": "

    A resource is already in an \"actionable\" state that must complete before a new update can be applied.

    ", "refs": { } }, - "EmbeddingUrl": { + "ConflictException": { + "base": "

    Updating or deleting a resource can cause an inconsistent state.

    ", + "refs": { + } + }, + "CreateColumnsOperation": { + "base": "

    A transform operation that creates calculated columns. Columns created in one such operation form a lexical closure.

    ", + "refs": { + "TransformOperation$CreateColumnsOperation": "

    An operation that creates calculated columns. Columns created in one such operation form a lexical closure.

    " + } + }, + "CreateDashboardRequest": { "base": null, "refs": { - "GetDashboardEmbedUrlResponse$EmbedUrl": "

    URL that you can put into your server-side webpage to embed your dashboard. This URL is valid for 5 minutes, and the resulting session is valid for 10 hours. The API provides the URL with an auth_code that enables a single-signon session.

    " } }, - "ExceptionResourceType": { + "CreateDashboardResponse": { "base": null, "refs": { - "LimitExceededException$ResourceType": "

    Limit exceeded.

    ", - "ResourceExistsException$ResourceType": "

    The AWS request ID for this request.

    ", - "ResourceNotFoundException$ResourceType": "

    The AWS request ID for this request.

    ", - "ResourceUnavailableException$ResourceType": "

    The resource type for this request.

    " } }, - "GetDashboardEmbedUrlRequest": { + "CreateDataSetRequest": { "base": null, "refs": { } }, - "GetDashboardEmbedUrlResponse": { + "CreateDataSetResponse": { "base": null, "refs": { } }, - "Group": { - "base": "

    A group in Amazon QuickSight consists of a set of users. You can use groups to make it easier to manage access and security. Currently, an Amazon QuickSight subscription can't contain more than 500 Amazon QuickSight groups.

    ", + "CreateDataSourceRequest": { + "base": null, "refs": { - "CreateGroupResponse$Group": "

    The name of the group.

    ", - "DescribeGroupResponse$Group": "

    The name of the group.

    ", - "GroupList$member": null, - "UpdateGroupResponse$Group": "

    The name of the group.

    " } }, - "GroupDescription": { + "CreateDataSourceResponse": { "base": null, "refs": { - "CreateGroupRequest$Description": "

    A description for the group that you want to create.

    ", - "Group$Description": "

    The group description.

    ", - "UpdateGroupRequest$Description": "

    The description for the group that you want to update.

    " } }, - "GroupList": { + "CreateGroupMembershipRequest": { "base": null, "refs": { - "ListGroupsResponse$GroupList": "

    The list of the groups.

    ", - "ListUserGroupsResponse$GroupList": "

    The list of groups the user is a member of.

    " } }, - "GroupMember": { - "base": "

    A member of an Amazon QuickSight group. Currently, group members must be users. Groups can't be members of another group.

    ", + "CreateGroupMembershipResponse": { + "base": null, "refs": { - "CreateGroupMembershipResponse$GroupMember": "

    The group member.

    ", - "GroupMemberList$member": null } }, - "GroupMemberList": { + "CreateGroupRequest": { + "base": "

    The request object for this operation.

    ", + "refs": { + } + }, + "CreateGroupResponse": { + "base": "

    The response object for this operation.

    ", + "refs": { + } + }, + "CreateIAMPolicyAssignmentRequest": { "base": null, "refs": { - "ListGroupMembershipsResponse$GroupMemberList": "

    The list of the members of the group.

    " } }, - "GroupMemberName": { + "CreateIAMPolicyAssignmentResponse": { "base": null, "refs": { - "CreateGroupMembershipRequest$MemberName": "

    The name of the user that you want to add to the group membership.

    ", - "DeleteGroupMembershipRequest$MemberName": "

    The name of the user that you want to delete from the group membership.

    ", - "GroupMember$MemberName": "

    The name of the group member (user).

    " } }, - "GroupName": { + "CreateIngestionRequest": { "base": null, "refs": { - "CreateGroupMembershipRequest$GroupName": "

    The name of the group that you want to add the user to.

    ", - "CreateGroupRequest$GroupName": "

    A name for the group that you want to create.

    ", - "DeleteGroupMembershipRequest$GroupName": "

    The name of the group that you want to delete the user from.

    ", - "DeleteGroupRequest$GroupName": "

    The name of the group that you want to delete.

    ", - "DescribeGroupRequest$GroupName": "

    The name of the group that you want to describe.

    ", - "Group$GroupName": "

    The name of the group.

    ", - "ListGroupMembershipsRequest$GroupName": "

    The name of the group that you want to see a membership list of.

    ", - "UpdateGroupRequest$GroupName": "

    The name of the group that you want to update.

    " } }, - "IdentityType": { + "CreateIngestionResponse": { "base": null, "refs": { - "GetDashboardEmbedUrlRequest$IdentityType": "

    The authentication method the user uses to sign in (IAM only).

    ", - "RegisterUserRequest$IdentityType": "

    Amazon QuickSight supports several ways of managing the identity of users. This parameter accepts two values:

    • IAM: A user whose identity maps to an existing IAM user or role.

    • QUICKSIGHT: A user whose identity is owned and managed internally by Amazon QuickSight.

    ", - "User$IdentityType": "

    The type of identity authentication used by the user.

    " } }, - "IdentityTypeNotSupportedException": { - "base": "

    The identity type specified is not supported. Supported identity types include IAM and QUICKSIGHT.

    ", + "CreateTemplateAliasRequest": { + "base": null, "refs": { } }, - "InternalFailureException": { - "base": "

    An internal failure occurred.

    ", + "CreateTemplateAliasResponse": { + "base": null, "refs": { } }, - "InvalidNextTokenException": { - "base": "

    The NextToken value isn't valid.

    ", + "CreateTemplateRequest": { + "base": null, "refs": { } }, - "InvalidParameterValueException": { - "base": "

    One or more parameters don't have a valid value.

    ", + "CreateTemplateResponse": { + "base": null, "refs": { } }, - "LimitExceededException": { - "base": "

    A limit is exceeded.

    ", + "CredentialPair": { + "base": "

    The combination of username and password that are used as credentials.

    ", "refs": { + "DataSourceCredentials$CredentialPair": "

    Credential pair.

    " } }, - "ListGroupMembershipsRequest": { + "CustomSql": { + "base": "

    A physical table type built from the results of the custom SQL query.

    ", + "refs": { + "PhysicalTable$CustomSql": "

    A physical table type built from the results of the custom SQL query.

    " + } + }, + "CustomSqlName": { "base": null, "refs": { + "CustomSql$Name": "

    A display name for the SQL query result.

    " } }, - "ListGroupMembershipsResponse": { + "Dashboard": { + "base": "

    Dashboard.

    ", + "refs": { + "DescribeDashboardResponse$Dashboard": "

    Information about the dashboard.

    " + } + }, + "DashboardBehavior": { "base": null, "refs": { + "AdHocFilteringOption$AvailabilityStatus": "

    Availability status.

    ", + "ExportToCSVOption$AvailabilityStatus": "

    Availability status.

    " } }, - "ListGroupsRequest": { + "DashboardError": { + "base": "

    Dashboard error.

    ", + "refs": { + "DashboardErrorList$member": null + } + }, + "DashboardErrorList": { "base": null, "refs": { + "DashboardVersion$Errors": "

    Errors.

    " } }, - "ListGroupsResponse": { + "DashboardErrorType": { "base": null, "refs": { + "DashboardError$Type": "

    Type.

    " } }, - "ListUserGroupsRequest": { + "DashboardName": { "base": null, "refs": { + "CreateDashboardRequest$Name": "

    The display name of the dashboard.

    ", + "Dashboard$Name": "

    A display name for the dataset.

    ", + "DashboardSummary$Name": "

    A display name for the dataset.

    ", + "UpdateDashboardRequest$Name": "

    The display name of the dashboard.

    " } }, - "ListUserGroupsResponse": { + "DashboardPublishOptions": { + "base": "

    Dashboard publish options.

    ", + "refs": { + "CreateDashboardRequest$DashboardPublishOptions": "

    Publishing options when creating dashboard.

    • AvailabilityStatus for AdHocFilteringOption - This can be either ENABLED or DISABLED. When This is set to set to DISABLED, QuickSight disables the left filter pane on the published dashboard, which can be used for AdHoc filtering. Enabled by default.

    • AvailabilityStatus for ExportToCSVOption - This can be either ENABLED or DISABLED. The visual option to export data to CSV is disabled when this is set to DISABLED. Enabled by default.

    • VisibilityState for SheetControlsOption - This can be either COLLAPSED or EXPANDED. The sheet controls pane is collapsed by default when set to true. Collapsed by default.

    Shorthand Syntax:

    AdHocFilteringDisabled=boolean,ExportToCSVDisabled=boolean,SheetControlsCollapsed=boolean

    ", + "UpdateDashboardRequest$DashboardPublishOptions": "

    Publishing options when creating a dashboard.

    • AvailabilityStatus for AdHocFilteringOption - This can be either ENABLED or DISABLED. When This is set to set to DISABLED, QuickSight disables the left filter pane on the published dashboard, which can be used for AdHoc filtering. Enabled by default.

    • AvailabilityStatus for ExportToCSVOption - This can be either ENABLED or DISABLED. The visual option to export data to CSV is disabled when this is set to DISABLED. Enabled by default.

    • VisibilityState for SheetControlsOption - This can be either COLLAPSED or EXPANDED. The sheet controls pane is collapsed by default when set to true. Collapsed by default.

    " + } + }, + "DashboardSourceEntity": { + "base": "

    Dashboard source entity.

    ", + "refs": { + "CreateDashboardRequest$SourceEntity": "

    Source entity from which the dashboard is created. The souce entity accepts the ARN of the source template or analysis and also references the replacement datasets for the placeholders set when creating the template. The replacement datasets need to follow the same schema as the datasets for which placeholders were created when creating the template.

    If you are creating a dashboard from a source entity in a different AWS account, use the ARN of the source template.

    ", + "UpdateDashboardRequest$SourceEntity": "

    The template or analysis from which the dashboard is created. The SouceTemplate entity accepts the Arn of the template and also references to replacement datasets for the placeholders set when creating the template. The replacement datasets need to follow the same schema as the datasets for which placeholders were created when creating the template.

    " + } + }, + "DashboardSourceTemplate": { + "base": "

    Dashboard source template.

    ", + "refs": { + "DashboardSourceEntity$SourceTemplate": "

    Source template.

    " + } + }, + "DashboardSummary": { + "base": "

    Dashboard summary.

    ", + "refs": { + "DashboardSummaryList$member": null + } + }, + "DashboardSummaryList": { "base": null, "refs": { + "ListDashboardsResponse$DashboardSummaryList": "

    A structure that contains all of the dashboards shared with the user. Provides basic information about the dashboards.

    " } }, - "ListUsersRequest": { + "DashboardUIState": { "base": null, "refs": { + "SheetControlsOption$VisibilityState": "

    Visibility state.

    " } }, - "ListUsersResponse": { + "DashboardVersion": { + "base": "

    Dashboard version.

    ", + "refs": { + "Dashboard$Version": "

    Version.

    " + } + }, + "DashboardVersionSummary": { + "base": "

    Dashboard version summary.

    ", + "refs": { + "DashboardVersionSummaryList$member": null + } + }, + "DashboardVersionSummaryList": { "base": null, "refs": { + "ListDashboardVersionsResponse$DashboardVersionSummaryList": "

    A structure that contains information about each version of the dashboard.

    " } }, - "MaxResults": { + "DataSet": { + "base": "

    Dataset.

    ", + "refs": { + "DescribeDataSetResponse$DataSet": "

    Information on the dataset.

    " + } + }, + "DataSetConfiguration": { + "base": "

    Dataset configuration.

    ", + "refs": { + "DataSetConfigurationList$member": null + } + }, + "DataSetConfigurationList": { "base": null, "refs": { - "ListGroupMembershipsRequest$MaxResults": "

    The maximum number of results to return from this request.

    ", - "ListGroupsRequest$MaxResults": "

    The maximum number of results to return.

    ", - "ListUserGroupsRequest$MaxResults": "

    The maximum number of results to return from this request.

    ", - "ListUsersRequest$MaxResults": "

    The maximum number of results to return from this request.

    " + "TemplateVersion$DataSetConfigurations": "

    Schema of the dataset identified by the placeholder. The idea is that any dashboard created from the template should be bound to new datasets matching the same schema described through this API. .

    " } }, - "Namespace": { + "DataSetImportMode": { "base": null, "refs": { - "CreateGroupMembershipRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "CreateGroupRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "DeleteGroupMembershipRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "DeleteGroupRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "DeleteUserByPrincipalIdRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "DeleteUserRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "DescribeGroupRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "DescribeUserRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "ListGroupMembershipsRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "ListGroupsRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "ListUserGroupsRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "ListUsersRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "RegisterUserRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "UpdateGroupRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", - "UpdateUserRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    " + "CreateDataSetRequest$ImportMode": "

    Indicates whether or not you want to import the data into SPICE.

    ", + "DataSet$ImportMode": "

    Indicates whether or not you want to import the data into SPICE.

    ", + "DataSetSummary$ImportMode": "

    Indicates whether or not you want to import the data into SPICE.

    ", + "UpdateDataSetRequest$ImportMode": "

    Indicates whether or not you want to import the data into SPICE.

    " } }, - "PreconditionNotMetException": { - "base": "

    One or more preconditions aren't met.

    ", + "DataSetName": { + "base": null, "refs": { + "AwsIotAnalyticsParameters$DataSetName": "

    Dataset name.

    " } }, - "QuickSightUserNotFoundException": { - "base": "

    The user is not found. This error can happen in any operation that requires finding a user based on a provided user name, such as DeleteUser, DescribeUser, and so on.

    ", + "DataSetReference": { + "base": "

    Dataset reference.

    ", "refs": { + "DataSetReferenceList$member": null } }, - "RegisterUserRequest": { + "DataSetReferenceList": { "base": null, "refs": { + "DashboardSourceTemplate$DataSetReferences": "

    Dataset references.

    ", + "TemplateSourceAnalysis$DataSetReferences": "

    A structure containing information about the dataset references used as placeholders in the template.

    " } }, - "RegisterUserResponse": { + "DataSetSchema": { + "base": "

    Dataset schema.

    ", + "refs": { + "DataSetConfiguration$DataSetSchema": "

    Dataset schema.

    " + } + }, + "DataSetSummary": { + "base": "

    Dataset summary.

    ", + "refs": { + "DataSetSummaryList$member": null + } + }, + "DataSetSummaryList": { "base": null, "refs": { + "ListDataSetsResponse$DataSetSummaries": "

    The list of dataset summaries.

    " } }, - "ResourceExistsException": { - "base": "

    The resource specified doesn't exist.

    ", + "DataSource": { + "base": "

    The structure of a data source.

    ", "refs": { + "DataSourceList$member": null, + "DescribeDataSourceResponse$DataSource": "

    The information on the data source.

    " } }, - "ResourceNotFoundException": { - "base": "

    One or more resources can't be found.

    ", + "DataSourceCredentials": { + "base": "

    Data source credentials.

    ", "refs": { + "CreateDataSourceRequest$Credentials": "

    The credentials QuickSight uses to connect to your underlying source. Currently only username/password based credentials are supported.

    ", + "UpdateDataSourceRequest$Credentials": "

    The credentials QuickSight uses to connect to your underlying source. Currently only username/password based credentials are supported.

    " } }, - "ResourceUnavailableException": { - "base": "

    This resource is currently unavailable.

    ", + "DataSourceErrorInfo": { + "base": "

    Error information on data source creation or update.

    ", "refs": { + "DataSource$ErrorInfo": "

    Error information from the last update or the creation of the data source.

    " } }, - "RoleSessionName": { + "DataSourceErrorInfoType": { "base": null, "refs": { - "RegisterUserRequest$SessionName": "

    You need to use this parameter only when you register one or more users using an assumed IAM role. You don't need to provide the session name for other scenarios, for example when you are registering an IAM user or an Amazon QuickSight user. You can register multiple users using the same IAM role if each user has a different session name. For more information on assuming IAM roles, see assume-role in the AWS CLI Reference.

    " + "DataSourceErrorInfo$Type": "

    Error type.

    " } }, - "SessionLifetimeInMinutes": { + "DataSourceList": { "base": null, "refs": { - "GetDashboardEmbedUrlRequest$SessionLifetimeInMinutes": "

    How many minutes the session is valid. The session lifetime must be between 15 and 600 minutes.

    " + "ListDataSourcesResponse$DataSources": "

    A list of data sources.

    " } }, - "SessionLifetimeInMinutesInvalidException": { - "base": "

    The number of minutes specified for the lifetime of a session is not valid. The session lifetime must be from 15 to 600 minutes.

    ", + "DataSourceParameters": { + "base": "

    The parameters QuickSight uses to connect to your underlying source. This is a variant type structure. At most one of the attributes should be non-null for this structure to be valid.

    ", "refs": { + "CreateDataSourceRequest$DataSourceParameters": "

    The parameters QuickSight uses to connect to your underlying source.

    ", + "DataSource$DataSourceParameters": "

    The parameters QuickSight uses to connect to your underlying source. This is a variant type structure. At most one of the attributes should be non-null for this structure to be valid.

    ", + "UpdateDataSourceRequest$DataSourceParameters": "

    The parameters QuickSight uses to connect to your underlying source.

    " } }, - "StatusCode": { + "DataSourceType": { "base": null, "refs": { - "CreateGroupMembershipResponse$Status": "

    The http status of the request.

    ", - "CreateGroupResponse$Status": "

    The http status of the request.

    ", - "DeleteGroupMembershipResponse$Status": "

    The http status of the request.

    ", - "DeleteGroupResponse$Status": "

    The http status of the request.

    ", - "DeleteUserByPrincipalIdResponse$Status": "

    The http status of the request.

    ", - "DeleteUserResponse$Status": "

    The http status of the request.

    ", - "DescribeGroupResponse$Status": "

    The http status of the request.

    ", - "DescribeUserResponse$Status": "

    The http status of the request.

    ", - "GetDashboardEmbedUrlResponse$Status": "

    The http status of the request.

    ", - "ListGroupMembershipsResponse$Status": "

    The http status of the request.

    ", - "ListGroupsResponse$Status": "

    The http status of the request.

    ", - "ListUserGroupsResponse$Status": "

    The HTTP status of the request.

    ", - "ListUsersResponse$Status": "

    The http status of the request.

    ", - "RegisterUserResponse$Status": "

    The http status of the request.

    ", - "UpdateGroupResponse$Status": "

    The http status of the request.

    ", - "UpdateUserResponse$Status": "

    The http status of the request.

    " + "CreateDataSourceRequest$Type": "

    The type of the data source. Currently the supported types for this operation are: ATHENA, AURORA, AURORA_POSTGRESQL, MARIADB, MYSQL, POSTGRESQL, PRESTO, REDSHIFT, S3, SNOWFLAKE, SPARK, SQLSERVER, TERADATA. Use ListDataSources to return a list of all data sources.

    ", + "DataSource$Type": "

    The type of the data source. This indicates which database engine the data source connects to.

    " + } + }, + "Database": { + "base": null, + "refs": { + "AuroraParameters$Database": "

    Database.

    ", + "AuroraPostgreSqlParameters$Database": "

    Database.

    ", + "MariaDbParameters$Database": "

    Database.

    ", + "MySqlParameters$Database": "

    Database.

    ", + "PostgreSqlParameters$Database": "

    Database.

    ", + "RdsParameters$Database": "

    Database.

    ", + "RedshiftParameters$Database": "

    Database.

    ", + "SnowflakeParameters$Database": "

    Database.

    ", + "SqlServerParameters$Database": "

    Database.

    ", + "TeradataParameters$Database": "

    Database.

    " + } + }, + "DateTimeParameter": { + "base": "

    Date time parameter.

    ", + "refs": { + "DateTimeParameterList$member": null + } + }, + "DateTimeParameterList": { + "base": null, + "refs": { + "Parameters$DateTimeParameters": "

    DateTime parameters.

    " + } + }, + "DecimalParameter": { + "base": "

    Decimal parameter.

    ", + "refs": { + "DecimalParameterList$member": null + } + }, + "DecimalParameterList": { + "base": null, + "refs": { + "Parameters$DecimalParameters": "

    Decimal parameters.

    " + } + }, + "DeleteDashboardRequest": { + "base": null, + "refs": { + } + }, + "DeleteDashboardResponse": { + "base": null, + "refs": { + } + }, + "DeleteDataSetRequest": { + "base": null, + "refs": { + } + }, + "DeleteDataSetResponse": { + "base": null, + "refs": { + } + }, + "DeleteDataSourceRequest": { + "base": null, + "refs": { + } + }, + "DeleteDataSourceResponse": { + "base": null, + "refs": { + } + }, + "DeleteGroupMembershipRequest": { + "base": null, + "refs": { + } + }, + "DeleteGroupMembershipResponse": { + "base": null, + "refs": { + } + }, + "DeleteGroupRequest": { + "base": null, + "refs": { + } + }, + "DeleteGroupResponse": { + "base": null, + "refs": { + } + }, + "DeleteIAMPolicyAssignmentRequest": { + "base": null, + "refs": { + } + }, + "DeleteIAMPolicyAssignmentResponse": { + "base": null, + "refs": { + } + }, + "DeleteTemplateAliasRequest": { + "base": null, + "refs": { + } + }, + "DeleteTemplateAliasResponse": { + "base": null, + "refs": { + } + }, + "DeleteTemplateRequest": { + "base": null, + "refs": { + } + }, + "DeleteTemplateResponse": { + "base": null, + "refs": { + } + }, + "DeleteUserByPrincipalIdRequest": { + "base": "

    ", + "refs": { + } + }, + "DeleteUserByPrincipalIdResponse": { + "base": null, + "refs": { + } + }, + "DeleteUserRequest": { + "base": null, + "refs": { + } + }, + "DeleteUserResponse": { + "base": null, + "refs": { + } + }, + "Delimiter": { + "base": null, + "refs": { + "UploadSettings$Delimiter": "

    The delimiter between values in the file.

    " + } + }, + "DescribeDashboardPermissionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDashboardPermissionsResponse": { + "base": null, + "refs": { + } + }, + "DescribeDashboardRequest": { + "base": null, + "refs": { + } + }, + "DescribeDashboardResponse": { + "base": null, + "refs": { + } + }, + "DescribeDataSetPermissionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataSetPermissionsResponse": { + "base": null, + "refs": { + } + }, + "DescribeDataSetRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataSetResponse": { + "base": null, + "refs": { + } + }, + "DescribeDataSourcePermissionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataSourcePermissionsResponse": { + "base": null, + "refs": { + } + }, + "DescribeDataSourceRequest": { + "base": null, + "refs": { + } + }, + "DescribeDataSourceResponse": { + "base": null, + "refs": { + } + }, + "DescribeGroupRequest": { + "base": null, + "refs": { + } + }, + "DescribeGroupResponse": { + "base": null, + "refs": { + } + }, + "DescribeIAMPolicyAssignmentRequest": { + "base": null, + "refs": { + } + }, + "DescribeIAMPolicyAssignmentResponse": { + "base": null, + "refs": { + } + }, + "DescribeIngestionRequest": { + "base": null, + "refs": { + } + }, + "DescribeIngestionResponse": { + "base": null, + "refs": { + } + }, + "DescribeTemplateAliasRequest": { + "base": null, + "refs": { + } + }, + "DescribeTemplateAliasResponse": { + "base": null, + "refs": { + } + }, + "DescribeTemplatePermissionsRequest": { + "base": null, + "refs": { + } + }, + "DescribeTemplatePermissionsResponse": { + "base": null, + "refs": { + } + }, + "DescribeTemplateRequest": { + "base": null, + "refs": { + } + }, + "DescribeTemplateResponse": { + "base": null, + "refs": { + } + }, + "DescribeUserRequest": { + "base": null, + "refs": { + } + }, + "DescribeUserResponse": { + "base": null, + "refs": { + } + }, + "Domain": { + "base": null, + "refs": { + "AmazonElasticsearchParameters$Domain": "

    The Amazon Elasticsearch domain.

    " + } + }, + "DomainNotWhitelistedException": { + "base": "

    The domain specified is not on the allowlist. All domains for embedded dashboards must be added to the approved list by an Amazon QuickSight admin.

    ", + "refs": { + } + }, + "Double": { + "base": null, + "refs": { + "DoubleList$member": null + } + }, + "DoubleList": { + "base": null, + "refs": { + "DecimalParameter$Values": "

    Values.

    " + } + }, + "EmbeddingUrl": { + "base": null, + "refs": { + "GetDashboardEmbedUrlResponse$EmbedUrl": "

    URL that you can put into your server-side webpage to embed your dashboard. This URL is valid for 5 minutes, and the resulting session is valid for 10 hours. The API provides the URL with an auth_code that enables a single-signon session.

    " + } + }, + "ErrorInfo": { + "base": "

    Error information on a data set SPICE ingestion.

    ", + "refs": { + "Ingestion$ErrorInfo": "

    Error information for this ingestion.

    " + } + }, + "ExceptionResourceType": { + "base": null, + "refs": { + "LimitExceededException$ResourceType": "

    Limit exceeded.

    ", + "ResourceExistsException$ResourceType": "

    The AWS request ID for this request.

    ", + "ResourceNotFoundException$ResourceType": "

    The AWS request ID for this request.

    ", + "ResourceUnavailableException$ResourceType": "

    The resource type for this request.

    " + } + }, + "ExportToCSVOption": { + "base": "

    Export to CSV option.

    ", + "refs": { + "DashboardPublishOptions$ExportToCSVOption": "

    Export to CSV option.

    " + } + }, + "Expression": { + "base": null, + "refs": { + "CalculatedColumn$Expression": "

    An expression that defines the calculated column.

    ", + "FilterOperation$ConditionExpression": "

    An expression that must evaluate to a boolean value. Rows for which the expression is evaluated to true are kept in the dataset.

    " + } + }, + "FileFormat": { + "base": null, + "refs": { + "UploadSettings$Format": "

    File format.

    " + } + }, + "FilterOperation": { + "base": "

    A transform operation that filters rows based on some condition.

    ", + "refs": { + "TransformOperation$FilterOperation": "

    An operation that filters rows based on some condition.

    " + } + }, + "GeoSpatialColumnGroup": { + "base": "

    Geospatial column group that denotes a hierarchy.

    ", + "refs": { + "ColumnGroup$GeoSpatialColumnGroup": "

    Geospatial column group that denotes a hierarchy.

    " + } + }, + "GeoSpatialCountryCode": { + "base": null, + "refs": { + "GeoSpatialColumnGroup$CountryCode": "

    Country code.

    " + } + }, + "GeoSpatialDataRole": { + "base": null, + "refs": { + "ColumnTag$ColumnGeographicRole": "

    A geospatial role for a column.

    " + } + }, + "GetDashboardEmbedUrlRequest": { + "base": null, + "refs": { + } + }, + "GetDashboardEmbedUrlResponse": { + "base": null, + "refs": { + } + }, + "Group": { + "base": "

    A group in Amazon QuickSight consists of a set of users. You can use groups to make it easier to manage access and security. Currently, an Amazon QuickSight subscription can't contain more than 500 Amazon QuickSight groups.

    ", + "refs": { + "CreateGroupResponse$Group": "

    The name of the group.

    ", + "DescribeGroupResponse$Group": "

    The name of the group.

    ", + "GroupList$member": null, + "UpdateGroupResponse$Group": "

    The name of the group.

    " + } + }, + "GroupDescription": { + "base": null, + "refs": { + "CreateGroupRequest$Description": "

    A description for the group that you want to create.

    ", + "Group$Description": "

    The group description.

    ", + "UpdateGroupRequest$Description": "

    The description for the group that you want to update.

    " + } + }, + "GroupList": { + "base": null, + "refs": { + "ListGroupsResponse$GroupList": "

    The list of the groups.

    ", + "ListUserGroupsResponse$GroupList": "

    The list of groups the user is a member of.

    " + } + }, + "GroupMember": { + "base": "

    A member of an Amazon QuickSight group. Currently, group members must be users. Groups can't be members of another group. .

    ", + "refs": { + "CreateGroupMembershipResponse$GroupMember": "

    The group member.

    ", + "GroupMemberList$member": null + } + }, + "GroupMemberList": { + "base": null, + "refs": { + "ListGroupMembershipsResponse$GroupMemberList": "

    The list of the members of the group.

    " + } + }, + "GroupMemberName": { + "base": null, + "refs": { + "CreateGroupMembershipRequest$MemberName": "

    The name of the user that you want to add to the group membership.

    ", + "DeleteGroupMembershipRequest$MemberName": "

    The name of the user that you want to delete from the group membership.

    ", + "GroupMember$MemberName": "

    The name of the group member (user).

    " + } + }, + "GroupName": { + "base": null, + "refs": { + "CreateGroupMembershipRequest$GroupName": "

    The name of the group that you want to add the user to.

    ", + "CreateGroupRequest$GroupName": "

    A name for the group that you want to create.

    ", + "DeleteGroupMembershipRequest$GroupName": "

    The name of the group that you want to delete the user from.

    ", + "DeleteGroupRequest$GroupName": "

    The name of the group that you want to delete.

    ", + "DescribeGroupRequest$GroupName": "

    The name of the group that you want to describe.

    ", + "Group$GroupName": "

    The name of the group.

    ", + "ListGroupMembershipsRequest$GroupName": "

    The name of the group that you want to see a membership list of.

    ", + "UpdateGroupRequest$GroupName": "

    The name of the group that you want to update.

    " + } + }, + "Host": { + "base": null, + "refs": { + "AuroraParameters$Host": "

    Host.

    ", + "AuroraPostgreSqlParameters$Host": "

    Host.

    ", + "MariaDbParameters$Host": "

    Host.

    ", + "MySqlParameters$Host": "

    Host.

    ", + "PostgreSqlParameters$Host": "

    Host.

    ", + "PrestoParameters$Host": "

    Host.

    ", + "RedshiftParameters$Host": "

    Host. This can be blank if the ClusterId is provided.

    ", + "SnowflakeParameters$Host": "

    Host.

    ", + "SparkParameters$Host": "

    Host.

    ", + "SqlServerParameters$Host": "

    Host.

    ", + "TeradataParameters$Host": "

    Host.

    " + } + }, + "IAMPolicyAssignment": { + "base": "

    IAM policy assignment.

    ", + "refs": { + "DescribeIAMPolicyAssignmentResponse$IAMPolicyAssignment": "

    Information describing the IAM policy assignment.

    " + } + }, + "IAMPolicyAssignmentName": { + "base": null, + "refs": { + "ActiveIAMPolicyAssignment$AssignmentName": "

    A name for the IAM policy assignment.

    ", + "CreateIAMPolicyAssignmentRequest$AssignmentName": "

    The name of the assignment. It must be unique within an AWS account.

    ", + "CreateIAMPolicyAssignmentResponse$AssignmentName": "

    The name of the assignment. Must be unique within an AWS account.

    ", + "DeleteIAMPolicyAssignmentRequest$AssignmentName": "

    The name of the assignment.

    ", + "DeleteIAMPolicyAssignmentResponse$AssignmentName": "

    The name of the assignment.

    ", + "DescribeIAMPolicyAssignmentRequest$AssignmentName": "

    The name of the assignment.

    ", + "IAMPolicyAssignment$AssignmentName": "

    Assignment name.

    ", + "IAMPolicyAssignmentSummary$AssignmentName": "

    Assignment name.

    ", + "UpdateIAMPolicyAssignmentRequest$AssignmentName": "

    The name of the assignment. It must be unique within an AWS account.

    ", + "UpdateIAMPolicyAssignmentResponse$AssignmentName": "

    The name of the assignment.

    " + } + }, + "IAMPolicyAssignmentSummary": { + "base": "

    IAM policy assignment Summary.

    ", + "refs": { + "IAMPolicyAssignmentSummaryList$member": null + } + }, + "IAMPolicyAssignmentSummaryList": { + "base": null, + "refs": { + "ListIAMPolicyAssignmentsResponse$IAMPolicyAssignments": "

    Information describing the IAM policy assignments.

    " + } + }, + "IdentityMap": { + "base": null, + "refs": { + "CreateIAMPolicyAssignmentRequest$Identities": "

    QuickSight users and/or groups that you want to assign the policy to.

    ", + "CreateIAMPolicyAssignmentResponse$Identities": "

    QuickSight users and/or groups that are assigned to the IAM policy.

    ", + "IAMPolicyAssignment$Identities": "

    Identities.

    ", + "UpdateIAMPolicyAssignmentRequest$Identities": "

    QuickSight users and/or groups that you want to assign to the specified IAM policy.

    ", + "UpdateIAMPolicyAssignmentResponse$Identities": "

    QuickSight users and/or groups that are assigned to this IAM policy.

    " + } + }, + "IdentityName": { + "base": null, + "refs": { + "IdentityNameList$member": null + } + }, + "IdentityNameList": { + "base": null, + "refs": { + "IdentityMap$value": null + } + }, + "IdentityType": { + "base": null, + "refs": { + "GetDashboardEmbedUrlRequest$IdentityType": "

    The authentication method the user uses to sign in (IAM only).

    ", + "RegisterUserRequest$IdentityType": "

    Amazon QuickSight supports several ways of managing the identity of users. This parameter accepts two values:

    • IAM: A user whose identity maps to an existing IAM user or role.

    • QUICKSIGHT: A user whose identity is owned and managed internally by Amazon QuickSight.

    ", + "User$IdentityType": "

    The type of identity authentication used by the user.

    " + } + }, + "IdentityTypeNotSupportedException": { + "base": "

    The identity type specified is not supported. Supported identity types include IAM and QUICKSIGHT.

    ", + "refs": { + } + }, + "Ingestion": { + "base": "

    Information on the SPICE ingestion for a dataset.

    ", + "refs": { + "DescribeIngestionResponse$Ingestion": "

    Information about the ingestion.

    ", + "Ingestions$member": null + } + }, + "IngestionErrorType": { + "base": null, + "refs": { + "ErrorInfo$Type": "

    Error type.

    " + } + }, + "IngestionId": { + "base": null, + "refs": { + "CancelIngestionRequest$IngestionId": "

    An ID for the ingestion.

    ", + "CancelIngestionResponse$IngestionId": "

    An ID for the ingestion.

    ", + "CreateIngestionRequest$IngestionId": "

    An ID for the ingestion.

    ", + "CreateIngestionResponse$IngestionId": "

    An ID for the ingestion.

    ", + "DescribeIngestionRequest$IngestionId": "

    An ID for the ingestion.

    ", + "Ingestion$IngestionId": "

    Ingestion ID.

    " + } + }, + "IngestionMaxResults": { + "base": null, + "refs": { + "ListIngestionsRequest$MaxResults": "

    The maximum number of results to be returned per request.

    " + } + }, + "IngestionRequestSource": { + "base": null, + "refs": { + "Ingestion$RequestSource": "

    Event source for this ingestion.

    " + } + }, + "IngestionRequestType": { + "base": null, + "refs": { + "Ingestion$RequestType": "

    Type of this ingestion.

    " + } + }, + "IngestionStatus": { + "base": null, + "refs": { + "CreateIngestionResponse$IngestionStatus": "

    The ingestion status.

    ", + "Ingestion$IngestionStatus": "

    Ingestion status.

    " + } + }, + "Ingestions": { + "base": null, + "refs": { + "ListIngestionsResponse$Ingestions": "

    A list of the ingestions.

    " + } + }, + "InputColumn": { + "base": "

    Metadata on a column that is used as the input of a transform operation.

    ", + "refs": { + "InputColumnList$member": null + } + }, + "InputColumnDataType": { + "base": null, + "refs": { + "InputColumn$Type": "

    The data type of the column.

    " + } + }, + "InputColumnList": { + "base": null, + "refs": { + "CustomSql$Columns": "

    The column schema from the SQL query result set.

    ", + "RelationalTable$InputColumns": "

    The column schema of the table.

    ", + "S3Source$InputColumns": "

    A physical table type for as S3 data source.

    " + } + }, + "InstanceId": { + "base": null, + "refs": { + "RdsParameters$InstanceId": "

    Instance ID.

    " + } + }, + "IntegerParameter": { + "base": "

    Integer parameter.

    ", + "refs": { + "IntegerParameterList$member": null + } + }, + "IntegerParameterList": { + "base": null, + "refs": { + "Parameters$IntegerParameters": "

    Integer parameters.

    " + } + }, + "InternalFailureException": { + "base": "

    An internal failure occurred.

    ", + "refs": { + } + }, + "InvalidNextTokenException": { + "base": "

    The NextToken value isn't valid.

    ", + "refs": { + } + }, + "InvalidParameterValueException": { + "base": "

    One or more parameters don't have a valid value.

    ", + "refs": { + } + }, + "JiraParameters": { + "base": "

    Jira parameters.

    ", + "refs": { + "DataSourceParameters$JiraParameters": "

    Jira parameters.

    " + } + }, + "JoinInstruction": { + "base": "

    Join instruction.

    ", + "refs": { + "LogicalTableSource$JoinInstruction": "

    Specifies the result of a join of two logical tables.

    " + } + }, + "JoinType": { + "base": null, + "refs": { + "JoinInstruction$Type": "

    Type.

    " + } + }, + "LimitExceededException": { + "base": "

    A limit is exceeded.

    ", + "refs": { + } + }, + "ListDashboardVersionsRequest": { + "base": null, + "refs": { + } + }, + "ListDashboardVersionsResponse": { + "base": null, + "refs": { + } + }, + "ListDashboardsRequest": { + "base": null, + "refs": { + } + }, + "ListDashboardsResponse": { + "base": null, + "refs": { + } + }, + "ListDataSetsRequest": { + "base": null, + "refs": { + } + }, + "ListDataSetsResponse": { + "base": null, + "refs": { + } + }, + "ListDataSourcesRequest": { + "base": null, + "refs": { + } + }, + "ListDataSourcesResponse": { + "base": null, + "refs": { + } + }, + "ListGroupMembershipsRequest": { + "base": null, + "refs": { + } + }, + "ListGroupMembershipsResponse": { + "base": null, + "refs": { + } + }, + "ListGroupsRequest": { + "base": null, + "refs": { + } + }, + "ListGroupsResponse": { + "base": null, + "refs": { + } + }, + "ListIAMPolicyAssignmentsForUserRequest": { + "base": null, + "refs": { + } + }, + "ListIAMPolicyAssignmentsForUserResponse": { + "base": null, + "refs": { + } + }, + "ListIAMPolicyAssignmentsRequest": { + "base": null, + "refs": { + } + }, + "ListIAMPolicyAssignmentsResponse": { + "base": null, + "refs": { + } + }, + "ListIngestionsRequest": { + "base": null, + "refs": { + } + }, + "ListIngestionsResponse": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, + "ListTemplateAliasesRequest": { + "base": null, + "refs": { + } + }, + "ListTemplateAliasesResponse": { + "base": null, + "refs": { + } + }, + "ListTemplateVersionsRequest": { + "base": null, + "refs": { + } + }, + "ListTemplateVersionsResponse": { + "base": null, + "refs": { + } + }, + "ListTemplatesRequest": { + "base": null, + "refs": { + } + }, + "ListTemplatesResponse": { + "base": null, + "refs": { + } + }, + "ListUserGroupsRequest": { + "base": null, + "refs": { + } + }, + "ListUserGroupsResponse": { + "base": null, + "refs": { + } + }, + "ListUsersRequest": { + "base": null, + "refs": { + } + }, + "ListUsersResponse": { + "base": null, + "refs": { + } + }, + "LogicalTable": { + "base": "

    A unit that joins and data transformations operate on. A logical table has a source, which can be either a physical table or result of a join. When it points to a physical table, a logical table acts as a mutable copy of that table through transform operations.

    ", + "refs": { + "LogicalTableMap$value": null + } + }, + "LogicalTableAlias": { + "base": null, + "refs": { + "LogicalTable$Alias": "

    A display name for the logical table.

    " + } + }, + "LogicalTableId": { + "base": null, + "refs": { + "JoinInstruction$LeftOperand": "

    Left operand.

    ", + "JoinInstruction$RightOperand": "

    Right operand.

    ", + "LogicalTableMap$key": null + } + }, + "LogicalTableMap": { + "base": null, + "refs": { + "CreateDataSetRequest$LogicalTableMap": "

    Configures the combination and transformation of the data from the physical tables.

    ", + "DataSet$LogicalTableMap": "

    Configures the combination and transformation of the data from the physical tables.

    ", + "UpdateDataSetRequest$LogicalTableMap": "

    Configures the combination and transformation of the data from the physical tables.

    " + } + }, + "LogicalTableSource": { + "base": "

    Information on the source of a logical table. This is a variant type structure. No more than one of the attributes should be non-null for this structure to be valid.

    ", + "refs": { + "LogicalTable$Source": "

    Source of this logical table.

    " + } + }, + "Long": { + "base": null, + "refs": { + "DataSet$ConsumedSpiceCapacityInBytes": "

    The amount of SPICE capacity used by this dataset. This is 0 if the dataset isn't imported into SPICE.

    ", + "LongList$member": null + } + }, + "LongList": { + "base": null, + "refs": { + "IntegerParameter$Values": "

    Values.

    " + } + }, + "ManifestFileLocation": { + "base": "

    Amazon S3 manifest file location.

    ", + "refs": { + "S3Parameters$ManifestFileLocation": "

    Location of the Amazon S3 manifest file. This is NULL if the manifest file was uploaded in the console.

    " + } + }, + "MariaDbParameters": { + "base": "

    MariaDB parameters.

    ", + "refs": { + "DataSourceParameters$MariaDbParameters": "

    MariaDB parameters.

    " + } + }, + "MaxResults": { + "base": null, + "refs": { + "ListDashboardVersionsRequest$MaxResults": "

    The maximum number of results to be returned per request.

    ", + "ListDashboardsRequest$MaxResults": "

    The maximum number of results to be returned per request.

    ", + "ListDataSetsRequest$MaxResults": "

    The maximum number of results to be returned per request.

    ", + "ListDataSourcesRequest$MaxResults": "

    The maximum number of results to be returned per request.

    ", + "ListGroupMembershipsRequest$MaxResults": "

    The maximum number of results to return from this request.

    ", + "ListGroupsRequest$MaxResults": "

    The maximum number of results to return.

    ", + "ListIAMPolicyAssignmentsForUserRequest$MaxResults": "

    The maximum number of results to be returned per request.

    ", + "ListIAMPolicyAssignmentsRequest$MaxResults": "

    The maximum number of results to be returned per request.

    ", + "ListTemplateAliasesRequest$MaxResults": "

    The maximum number of results to be returned per request.

    ", + "ListTemplateVersionsRequest$MaxResults": "

    The maximum number of results to be returned per request.

    ", + "ListTemplatesRequest$MaxResults": "

    The maximum number of results to be returned per request.

    ", + "ListUserGroupsRequest$MaxResults": "

    The maximum number of results to return from this request.

    ", + "ListUsersRequest$MaxResults": "

    The maximum number of results to return from this request.

    " + } + }, + "MySqlParameters": { + "base": "

    MySQL parameters.

    ", + "refs": { + "DataSourceParameters$MySqlParameters": "

    MySQL parameters.

    " + } + }, + "Namespace": { + "base": null, + "refs": { + "CreateGroupMembershipRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "CreateGroupRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "CreateIAMPolicyAssignmentRequest$Namespace": "

    The namespace that contains the assignment.

    ", + "DeleteGroupMembershipRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "DeleteGroupRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "DeleteIAMPolicyAssignmentRequest$Namespace": "

    The namespace that contains the assignment.

    ", + "DeleteUserByPrincipalIdRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "DeleteUserRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "DescribeGroupRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "DescribeIAMPolicyAssignmentRequest$Namespace": "

    The namespace that contains the assignment.

    ", + "DescribeUserRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "ListGroupMembershipsRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "ListGroupsRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "ListIAMPolicyAssignmentsForUserRequest$Namespace": "

    The namespace of the assignment.

    ", + "ListIAMPolicyAssignmentsRequest$Namespace": "

    The namespace for this assignment.

    ", + "ListUserGroupsRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "ListUsersRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "RegisterUserRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "UpdateGroupRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    ", + "UpdateIAMPolicyAssignmentRequest$Namespace": "

    The namespace of the assignment.

    ", + "UpdateUserRequest$Namespace": "

    The namespace. Currently, you should set this to default.

    " + } + }, + "NonEmptyString": { + "base": null, + "refs": { + "DashboardError$Message": "

    Message.

    ", + "DataSetReference$DataSetPlaceholder": "

    Dataset placeholder.

    ", + "DateTimeParameter$Name": "

    A display name for the dataset.

    ", + "DecimalParameter$Name": "

    A display name for the dataset.

    ", + "IntegerParameter$Name": "

    A display name for the dataset.

    ", + "StringParameter$Name": "

    A display name for the dataset.

    ", + "TemplateError$Message": "

    Description of the error type.

    " + } + }, + "OnClause": { + "base": null, + "refs": { + "JoinInstruction$OnClause": "

    On Clause.

    " + } + }, + "OptionalPort": { + "base": null, + "refs": { + "RedshiftParameters$Port": "

    Port. This can be blank if the ClusterId is provided.

    " + } + }, + "OutputColumn": { + "base": "

    Output column.

    ", + "refs": { + "OutputColumnList$member": null + } + }, + "OutputColumnList": { + "base": null, + "refs": { + "DataSet$OutputColumns": "

    The list of columns after all transforms. These columns are available in templates, analyses, and dashboards.

    " + } + }, + "Parameters": { + "base": "

    Parameters.

    ", + "refs": { + "CreateDashboardRequest$Parameters": "

    A structure that contains the parameters of the dashboard. These are parameter overrides for a dashboard. A dashboard can have any type of parameters and some parameters might accept multiple values. You could use the following structure to override two string parameters that accept multiple values:

    ", + "UpdateDashboardRequest$Parameters": "

    A structure that contains the parameters of the dashboard.

    " + } + }, + "Password": { + "base": null, + "refs": { + "CredentialPair$Password": "

    Password.

    " + } + }, + "PhysicalTable": { + "base": "

    A view of a data source. Contains information on the shape of the data in the underlying source. This is a variant type structure. No more than one of the attributes can be non-null for this structure to be valid.

    ", + "refs": { + "PhysicalTableMap$value": null + } + }, + "PhysicalTableId": { + "base": null, + "refs": { + "LogicalTableSource$PhysicalTableId": "

    Physical table ID.

    ", + "PhysicalTableMap$key": null + } + }, + "PhysicalTableMap": { + "base": null, + "refs": { + "CreateDataSetRequest$PhysicalTableMap": "

    Declares the physical tables that are available in the underlying data sources.

    ", + "DataSet$PhysicalTableMap": "

    Declares the physical tables that are available in the underlying data sources.

    ", + "UpdateDataSetRequest$PhysicalTableMap": "

    Declares the physical tables that are available in the underlying data sources.

    " + } + }, + "Port": { + "base": null, + "refs": { + "AuroraParameters$Port": "

    Port.

    ", + "AuroraPostgreSqlParameters$Port": "

    Port.

    ", + "MariaDbParameters$Port": "

    Port.

    ", + "MySqlParameters$Port": "

    Port.

    ", + "PostgreSqlParameters$Port": "

    Port.

    ", + "PrestoParameters$Port": "

    Port.

    ", + "SparkParameters$Port": "

    Port.

    ", + "SqlServerParameters$Port": "

    Port.

    ", + "TeradataParameters$Port": "

    Port.

    " + } + }, + "PositiveInteger": { + "base": null, + "refs": { + "TwitterParameters$MaxRows": "

    Maximum number of rows to query Twitter.

    ", + "UploadSettings$StartFromRow": "

    A row number to start reading data from.

    " + } + }, + "PostgreSqlParameters": { + "base": "

    PostgreSQL parameters.

    ", + "refs": { + "DataSourceParameters$PostgreSqlParameters": "

    PostgreSQL parameters.

    " + } + }, + "PreconditionNotMetException": { + "base": "

    One or more preconditions aren't met.

    ", + "refs": { + } + }, + "PrestoParameters": { + "base": "

    Presto parameters.

    ", + "refs": { + "DataSourceParameters$PrestoParameters": "

    Presto parameters.

    " + } + }, + "Principal": { + "base": null, + "refs": { + "ResourcePermission$Principal": "

    The ARN of a QuickSight user or group, or an IAM ARN. If you are using cross-account resource sharing, this is the IAM ARN of an account root. Otherwise, it is the ARN of a QuickSight user or group. .

    " + } + }, + "ProjectOperation": { + "base": "

    A transform operation that projects columns. Operations that come after a projection can only refer to projected columns.

    ", + "refs": { + "TransformOperation$ProjectOperation": "

    An operation that projects columns. Operations that come after a projection can only refer to projected columns.

    " + } + }, + "ProjectedColumnList": { + "base": null, + "refs": { + "ProjectOperation$ProjectedColumns": "

    Projected columns.

    " + } + }, + "Query": { + "base": null, + "refs": { + "TwitterParameters$Query": "

    Twitter query string.

    " + } + }, + "QueueInfo": { + "base": "

    Information on queued dataset SPICE ingestion.

    ", + "refs": { + "Ingestion$QueueInfo": null + } + }, + "QuickSightUserNotFoundException": { + "base": "

    The user is not found. This error can happen in any operation that requires finding a user based on a provided user name, such as DeleteUser, DescribeUser, and so on.

    ", + "refs": { + } + }, + "RdsParameters": { + "base": "

    RDS parameters.

    ", + "refs": { + "DataSourceParameters$RdsParameters": "

    RDS parameters.

    " + } + }, + "RedshiftParameters": { + "base": "

    Redshift parameters. The ClusterId field can be blank if Host and Port are both set, and the other way around.

    ", + "refs": { + "DataSourceParameters$RedshiftParameters": "

    Redshift parameters.

    " + } + }, + "RegisterUserRequest": { + "base": null, + "refs": { + } + }, + "RegisterUserResponse": { + "base": null, + "refs": { + } + }, + "RelationalTable": { + "base": "

    A physical table type for relational data sources.

    ", + "refs": { + "PhysicalTable$RelationalTable": "

    A physical table type for relational data sources.

    " + } + }, + "RelationalTableName": { + "base": null, + "refs": { + "RelationalTable$Name": "

    Name of the relational table.

    " + } + }, + "RelationalTableSchema": { + "base": null, + "refs": { + "RelationalTable$Schema": "

    The schema name. Applies to certain relational database engines.

    " + } + }, + "RenameColumnOperation": { + "base": "

    A transform operation that renames a column.

    ", + "refs": { + "TransformOperation$RenameColumnOperation": "

    An operation that renames a column.

    " + } + }, + "ResourceExistsException": { + "base": "

    The resource specified already exists.

    ", + "refs": { + } + }, + "ResourceId": { + "base": null, + "refs": { + "CreateDataSetRequest$DataSetId": "

    An ID for the dataset you want to create. This is unique per region per AWS account.

    ", + "CreateDataSetResponse$DataSetId": "

    The ID for the dataset you want to create. This is unique per region per AWS account.

    ", + "CreateDataSetResponse$IngestionId": "

    The ID of the ingestion, which is triggered as a result of dataset creation if the import mode is SPICE

    ", + "CreateDataSourceRequest$DataSourceId": "

    An ID for the data source. This is unique per AWS Region per AWS account.

    ", + "CreateDataSourceResponse$DataSourceId": "

    The ID of the data source. This is unique per AWS Region per AWS account.

    ", + "DataSet$DataSetId": "

    The ID of the dataset.

    ", + "DataSetSummary$DataSetId": "

    The ID of the dataset.

    ", + "DataSource$DataSourceId": "

    The ID of the data source. This is unique per AWS Region per AWS account.

    ", + "DeleteDataSetRequest$DataSetId": "

    The ID for the dataset you want to create. This is unique per region per AWS account.

    ", + "DeleteDataSetResponse$DataSetId": "

    The ID for the dataset you want to create. This is unique per region per AWS account.

    ", + "DeleteDataSourceRequest$DataSourceId": "

    The ID of the data source. This is unique per AWS Region per AWS account.

    ", + "DeleteDataSourceResponse$DataSourceId": "

    The ID of the data source. This is unique per AWS Region per AWS account.

    ", + "DescribeDataSetPermissionsRequest$DataSetId": "

    The ID for the dataset you want to create. This is unique per region per AWS account.

    ", + "DescribeDataSetPermissionsResponse$DataSetId": "

    The ID for the dataset you want to create. This is unique per region per AWS account.

    ", + "DescribeDataSetRequest$DataSetId": "

    The ID for the dataset you want to create. This is unique per region per AWS account.

    ", + "DescribeDataSourcePermissionsRequest$DataSourceId": "

    The ID of the data source. This is unique per AWS Region per AWS account.

    ", + "DescribeDataSourcePermissionsResponse$DataSourceId": "

    The ID of the data source. This is unique per AWS Region per AWS account.

    ", + "DescribeDataSourceRequest$DataSourceId": "

    The ID of the data source. This is unique per AWS Region per AWS account.

    ", + "UpdateDataSetPermissionsRequest$DataSetId": "

    The ID for the dataset you want to create. This is unique per region per AWS account.

    ", + "UpdateDataSetPermissionsResponse$DataSetId": "

    The ID for the dataset you want to create. This is unique per region per AWS account.

    ", + "UpdateDataSetRequest$DataSetId": "

    The ID for the dataset you want to create. This is unique per region per AWS account.

    ", + "UpdateDataSetResponse$DataSetId": "

    The ID for the dataset you want to create. This is unique per region per AWS account.

    ", + "UpdateDataSetResponse$IngestionId": "

    The ID of the ingestion, which is triggered as a result of dataset creation if the import mode is SPICE

    ", + "UpdateDataSourcePermissionsRequest$DataSourceId": "

    The ID of the data source. This is unique per AWS Region per AWS account.

    ", + "UpdateDataSourcePermissionsResponse$DataSourceId": "

    The ID of the data source. This is unique per AWS Region per AWS account.

    ", + "UpdateDataSourceRequest$DataSourceId": "

    The ID of the data source. This is unique per AWS Region per AWS account.

    ", + "UpdateDataSourceResponse$DataSourceId": "

    The ID of the data source. This is unique per AWS Region per AWS account.

    " + } + }, + "ResourceName": { + "base": null, + "refs": { + "CreateDataSetRequest$Name": "

    The display name for the dataset.

    ", + "CreateDataSourceRequest$Name": "

    A display name for the data source.

    ", + "DataSet$Name": "

    A display name for the dataset.

    ", + "DataSetSummary$Name": "

    A display name for the dataset.

    ", + "DataSource$Name": "

    A display name for the data source.

    ", + "UpdateDataSetRequest$Name": "

    The display name for the dataset.

    ", + "UpdateDataSourceRequest$Name": "

    A display name for the data source.

    " + } + }, + "ResourceNotFoundException": { + "base": "

    One or more resources can't be found.

    ", + "refs": { + } + }, + "ResourcePermission": { + "base": "

    Permission for the resource.

    ", + "refs": { + "ResourcePermissionList$member": null, + "UpdateResourcePermissionList$member": null + } + }, + "ResourcePermissionList": { + "base": null, + "refs": { + "CreateDashboardRequest$Permissions": "

    A structure that contains the permissions of the dashboard. You can use this for granting permissions with principal and action information.

    ", + "CreateDataSetRequest$Permissions": "

    A list of resource permissions on the dataset.

    ", + "CreateDataSourceRequest$Permissions": "

    A list of resource permissions on the data source.

    ", + "CreateTemplateRequest$Permissions": "

    A list of resource permissions to be set on the template. The shorthand syntax should look similar to this: Shorthand Syntax: Principal=string,Actions=string,string ...

    ", + "DescribeDashboardPermissionsResponse$Permissions": "

    A structure that contains the permissions of the dashboard.

    ", + "DescribeDataSetPermissionsResponse$Permissions": "

    A list of resource permissions on the dataset.

    ", + "DescribeDataSourcePermissionsResponse$Permissions": "

    A list of resource permissions on the data source.

    ", + "DescribeTemplatePermissionsResponse$Permissions": "

    A list of resource permissions to be set on the template.

    ", + "UpdateDashboardPermissionsResponse$Permissions": "

    Information about the permissions on the dashboard.

    ", + "UpdateDataSetPermissionsRequest$GrantPermissions": "

    The resource permissions that you want to grant to the dataset.

    ", + "UpdateDataSetPermissionsRequest$RevokePermissions": "

    The resource permissions that you want to revoke from the dataset.

    ", + "UpdateDataSourcePermissionsRequest$GrantPermissions": "

    A list of resource permissions that you want to grant on the data source.

    ", + "UpdateDataSourcePermissionsRequest$RevokePermissions": "

    A list of resource permissions that you want to revoke on the data source.

    ", + "UpdateTemplatePermissionsResponse$Permissions": "

    A list of resource permissions to be set on the template.

    " + } + }, + "ResourceStatus": { + "base": null, + "refs": { + "CreateDashboardResponse$CreationStatus": "

    The creation status of the dashboard create request.

    ", + "CreateDataSourceResponse$CreationStatus": "

    The status of creating the data source.

    ", + "CreateTemplateResponse$CreationStatus": "

    The template creation status.

    ", + "DashboardVersion$Status": "

    The http status of the request.

    ", + "DashboardVersionSummary$Status": "

    The http status of the request.

    ", + "DataSource$Status": "

    The http status of the request.

    ", + "TemplateVersion$Status": "

    The http status of the request.

    ", + "TemplateVersionSummary$Status": "

    The status of the template version.

    ", + "UpdateDashboardResponse$CreationStatus": "

    The creation status of the request.

    ", + "UpdateDataSourceResponse$UpdateStatus": "

    The update status of the data source's last update.

    ", + "UpdateTemplateResponse$CreationStatus": "

    The creation status of the template.

    " + } + }, + "ResourceUnavailableException": { + "base": "

    This resource is currently unavailable.

    ", + "refs": { + } + }, + "RestrictiveResourceId": { + "base": null, + "refs": { + "CreateDashboardRequest$DashboardId": "

    The ID for the dashboard, also added to IAM policy.

    ", + "CreateDashboardResponse$DashboardId": "

    The ID for the dashboard.

    ", + "CreateTemplateAliasRequest$TemplateId": "

    An ID for the template.

    ", + "CreateTemplateRequest$TemplateId": "

    An ID for the template you want to create. This is unique per AWS region per AWS account.

    ", + "CreateTemplateResponse$TemplateId": "

    The ID of the template.

    ", + "Dashboard$DashboardId": "

    Dashboard ID.

    ", + "DashboardSummary$DashboardId": "

    Dashboard ID.

    ", + "DeleteDashboardRequest$DashboardId": "

    The ID for the dashboard.

    ", + "DeleteDashboardResponse$DashboardId": "

    The ID of the dashboard.

    ", + "DeleteTemplateAliasRequest$TemplateId": "

    An ID for the template.

    ", + "DeleteTemplateAliasResponse$TemplateId": "

    An ID for the template.

    ", + "DeleteTemplateRequest$TemplateId": "

    An ID for the template you want to delete.

    ", + "DeleteTemplateResponse$TemplateId": "

    An ID for the template.

    ", + "DescribeDashboardPermissionsRequest$DashboardId": "

    The ID for the dashboard, also added to IAM policy.

    ", + "DescribeDashboardPermissionsResponse$DashboardId": "

    The ID for the dashboard.

    ", + "DescribeDashboardRequest$DashboardId": "

    The ID for the dashboard.

    ", + "DescribeTemplateAliasRequest$TemplateId": "

    An ID for the template.

    ", + "DescribeTemplatePermissionsRequest$TemplateId": "

    The ID for the template.

    ", + "DescribeTemplatePermissionsResponse$TemplateId": "

    The ID for the template.

    ", + "DescribeTemplateRequest$TemplateId": "

    An ID for the template.

    ", + "GetDashboardEmbedUrlRequest$DashboardId": "

    The ID for the dashboard, also added to IAM policy

    ", + "ListDashboardVersionsRequest$DashboardId": "

    The ID for the dashboard.

    ", + "ListTemplateAliasesRequest$TemplateId": "

    The ID for the template.

    ", + "ListTemplateVersionsRequest$TemplateId": "

    The ID for the template.

    ", + "Template$TemplateId": "

    The ID for the template. This is unique per region per AWS account.

    ", + "TemplateSummary$TemplateId": "

    The ID of the template. This is unique per region per AWS account.

    ", + "UpdateDashboardPermissionsRequest$DashboardId": "

    The ID for the dashboard.

    ", + "UpdateDashboardPermissionsResponse$DashboardId": "

    The ID for the dashboard.

    ", + "UpdateDashboardPublishedVersionRequest$DashboardId": "

    The ID for the dashboard.

    ", + "UpdateDashboardPublishedVersionResponse$DashboardId": "

    The ID for the dashboard.

    ", + "UpdateDashboardRequest$DashboardId": "

    The ID for the dashboard.

    ", + "UpdateDashboardResponse$DashboardId": "

    The ID for the dashboard.

    ", + "UpdateTemplateAliasRequest$TemplateId": "

    The ID for the template.

    ", + "UpdateTemplatePermissionsRequest$TemplateId": "

    The ID for the template.

    ", + "UpdateTemplatePermissionsResponse$TemplateId": "

    The ID for the template.

    ", + "UpdateTemplateRequest$TemplateId": "

    The ID for the template.

    ", + "UpdateTemplateResponse$TemplateId": "

    The ID for the template.

    " + } + }, + "RoleSessionName": { + "base": null, + "refs": { + "RegisterUserRequest$SessionName": "

    You need to use this parameter only when you register one or more users using an assumed IAM role. You don't need to provide the session name for other scenarios, for example when you are registering an IAM user or an Amazon QuickSight user. You can register multiple users using the same IAM role if each user has a different session name. For more information on assuming IAM roles, see assume-role in the AWS CLI Reference.

    " + } + }, + "RowInfo": { + "base": "

    Information on rows during a data set SPICE ingestion.

    ", + "refs": { + "Ingestion$RowInfo": null + } + }, + "RowLevelPermissionDataSet": { + "base": "

    Row-level security configuration on the dataset.

    ", + "refs": { + "CreateDataSetRequest$RowLevelPermissionDataSet": "

    Row-level security configuration on the data you want to create.

    ", + "DataSet$RowLevelPermissionDataSet": "

    Row-level security configuration on the dataset.

    ", + "DataSetSummary$RowLevelPermissionDataSet": "

    Row-level security configuration on the dataset.

    ", + "UpdateDataSetRequest$RowLevelPermissionDataSet": "

    Row-level security configuration on the data you want to create.

    " + } + }, + "RowLevelPermissionPolicy": { + "base": null, + "refs": { + "RowLevelPermissionDataSet$PermissionPolicy": "

    Permission policy.

    " + } + }, + "S3Bucket": { + "base": null, + "refs": { + "ManifestFileLocation$Bucket": "

    Amazon S3 bucket.

    " + } + }, + "S3Key": { + "base": null, + "refs": { + "ManifestFileLocation$Key": "

    Amazon S3 key that identifies an object.

    " + } + }, + "S3Parameters": { + "base": "

    S3 parameters.

    ", + "refs": { + "DataSourceParameters$S3Parameters": "

    S3 parameters.

    " + } + }, + "S3Source": { + "base": "

    A physical table type for as S3 data source.

    ", + "refs": { + "PhysicalTable$S3Source": "

    A physical table type for as S3 data source.

    " + } + }, + "ServiceNowParameters": { + "base": "

    ServiceNow parameters.

    ", + "refs": { + "DataSourceParameters$ServiceNowParameters": "

    ServiceNow parameters.

    " + } + }, + "SessionLifetimeInMinutes": { + "base": null, + "refs": { + "GetDashboardEmbedUrlRequest$SessionLifetimeInMinutes": "

    How many minutes the session is valid. The session lifetime must be between 15 and 600 minutes.

    " + } + }, + "SessionLifetimeInMinutesInvalidException": { + "base": "

    The number of minutes specified for the lifetime of a session is not valid. The session lifetime must be from 15 to 600 minutes.

    ", + "refs": { + } + }, + "SheetControlsOption": { + "base": "

    Sheet controls option.

    ", + "refs": { + "DashboardPublishOptions$SheetControlsOption": "

    Sheet controls option.

    " + } + }, + "SiteBaseUrl": { + "base": null, + "refs": { + "JiraParameters$SiteBaseUrl": "

    The base URL of the Jira site.

    ", + "ServiceNowParameters$SiteBaseUrl": "

    URL of the base site.

    " + } + }, + "SnowflakeParameters": { + "base": "

    Snowflake parameters.

    ", + "refs": { + "DataSourceParameters$SnowflakeParameters": "

    Snowflake parameters.

    " + } + }, + "SparkParameters": { + "base": "

    Spark parameters.

    ", + "refs": { + "DataSourceParameters$SparkParameters": "

    Spark parameters.

    " + } + }, + "SqlQuery": { + "base": null, + "refs": { + "CustomSql$SqlQuery": "

    The SQL query.

    " + } + }, + "SqlServerParameters": { + "base": "

    SQL Server parameters.

    ", + "refs": { + "DataSourceParameters$SqlServerParameters": "

    SQL Server parameters.

    " + } + }, + "SslProperties": { + "base": "

    SSL properties that apply when QuickSight connects to your underlying data source.

    ", + "refs": { + "CreateDataSourceRequest$SslProperties": "

    SSL properties that apply when QuickSight connects to your underlying source.

    ", + "DataSource$SslProperties": "

    SSL properties that apply when QuickSight connects to your underlying source.

    ", + "UpdateDataSourceRequest$SslProperties": "

    SSL properties that apply when QuickSight connects to your underlying source.

    " + } + }, + "StatusCode": { + "base": null, + "refs": { + "CancelIngestionResponse$Status": "

    The http status of the request.

    ", + "CreateDashboardResponse$Status": "

    The http status of the request.

    ", + "CreateDataSetResponse$Status": "

    The http status of the request.

    ", + "CreateDataSourceResponse$Status": "

    The http status of the request.

    ", + "CreateGroupMembershipResponse$Status": "

    The http status of the request.

    ", + "CreateGroupResponse$Status": "

    The http status of the request.

    ", + "CreateIAMPolicyAssignmentResponse$Status": "

    The http status of the request.

    ", + "CreateIngestionResponse$Status": "

    The http status of the request.

    ", + "CreateTemplateAliasResponse$Status": "

    The http status of the request.

    ", + "CreateTemplateResponse$Status": "

    The http status of the request.

    ", + "DeleteDashboardResponse$Status": "

    The http status of the request.

    ", + "DeleteDataSetResponse$Status": "

    The http status of the request.

    ", + "DeleteDataSourceResponse$Status": "

    The http status of the request.

    ", + "DeleteGroupMembershipResponse$Status": "

    The http status of the request.

    ", + "DeleteGroupResponse$Status": "

    The http status of the request.

    ", + "DeleteIAMPolicyAssignmentResponse$Status": "

    The http status of the request.

    ", + "DeleteTemplateAliasResponse$Status": "

    The http status of the request.

    ", + "DeleteTemplateResponse$Status": "

    The http status of the request.

    ", + "DeleteUserByPrincipalIdResponse$Status": "

    The http status of the request.

    ", + "DeleteUserResponse$Status": "

    The http status of the request.

    ", + "DescribeDashboardPermissionsResponse$Status": "

    The http status of the request.

    ", + "DescribeDashboardResponse$Status": "

    The http status of this request.

    ", + "DescribeDataSetPermissionsResponse$Status": "

    The http status of the request.

    ", + "DescribeDataSetResponse$Status": "

    The http status of the request.

    ", + "DescribeDataSourcePermissionsResponse$Status": "

    The http status of the request.

    ", + "DescribeDataSourceResponse$Status": "

    The http status of the request.

    ", + "DescribeGroupResponse$Status": "

    The http status of the request.

    ", + "DescribeIAMPolicyAssignmentResponse$Status": "

    The http status of the request.

    ", + "DescribeIngestionResponse$Status": "

    The http status of the request.

    ", + "DescribeTemplateAliasResponse$Status": "

    The http status of the request.

    ", + "DescribeTemplatePermissionsResponse$Status": "

    The http status of the request.

    ", + "DescribeTemplateResponse$Status": "

    The http status of the request.

    ", + "DescribeUserResponse$Status": "

    The http status of the request.

    ", + "GetDashboardEmbedUrlResponse$Status": "

    The http status of the request.

    ", + "ListDashboardVersionsResponse$Status": "

    The http status of the request.

    ", + "ListDashboardsResponse$Status": "

    The http status of the request.

    ", + "ListDataSetsResponse$Status": "

    The http status of the request.

    ", + "ListDataSourcesResponse$Status": "

    The http status of the request.

    ", + "ListGroupMembershipsResponse$Status": "

    The http status of the request.

    ", + "ListGroupsResponse$Status": "

    The http status of the request.

    ", + "ListIAMPolicyAssignmentsForUserResponse$Status": "

    The http status of the request.

    ", + "ListIAMPolicyAssignmentsResponse$Status": "

    The http status of the request.

    ", + "ListIngestionsResponse$Status": "

    The http status of the request.

    ", + "ListTagsForResourceResponse$Status": "

    The http status of the request.

    ", + "ListTemplateAliasesResponse$Status": "

    The http status of the request.

    ", + "ListTemplateVersionsResponse$Status": "

    The http status of the request.

    ", + "ListTemplatesResponse$Status": "

    The http status of the request.

    ", + "ListUserGroupsResponse$Status": "

    The HTTP status of the request.

    ", + "ListUsersResponse$Status": "

    The http status of the request.

    ", + "RegisterUserResponse$Status": "

    The http status of the request.

    ", + "TagResourceResponse$Status": "

    The http status of the request.

    ", + "UntagResourceResponse$Status": "

    The http status of the request.

    ", + "UpdateDashboardPermissionsResponse$Status": "

    The http status of the request.

    ", + "UpdateDashboardPublishedVersionResponse$Status": "

    The http status of the request.

    ", + "UpdateDashboardResponse$Status": "

    The http status of the request.

    ", + "UpdateDataSetPermissionsResponse$Status": "

    The http status of the request.

    ", + "UpdateDataSetResponse$Status": "

    The http status of the request.

    ", + "UpdateDataSourcePermissionsResponse$Status": "

    The http status of the request.

    ", + "UpdateDataSourceResponse$Status": "

    The http status of the request.

    ", + "UpdateGroupResponse$Status": "

    The http status of the request.

    ", + "UpdateIAMPolicyAssignmentResponse$Status": "

    The http status of the request.

    ", + "UpdateTemplateAliasResponse$Status": "

    The http status of the request.

    ", + "UpdateTemplatePermissionsResponse$Status": "

    The http status of the request.

    ", + "UpdateTemplateResponse$Status": "

    The http status of the request.

    ", + "UpdateUserResponse$Status": "

    The http status of the request.

    " } }, "String": { @@ -409,20 +2196,55 @@ "refs": { "AccessDeniedException$Message": null, "AccessDeniedException$RequestId": "

    The AWS request id for this request.

    ", + "ActionList$member": null, + "ColumnGroupColumnSchema$Name": "

    The name of the column group's column schema.

    ", + "ColumnGroupSchema$Name": "

    The name of the column group schema.

    ", + "ColumnSchema$Name": "

    The name of the column schema.

    ", + "ColumnSchema$DataType": "

    The data type of the column schema.

    ", + "ColumnSchema$GeographicRole": "

    The geographic role of the column schema.

    ", + "ConcurrentUpdatingException$Message": null, + "ConcurrentUpdatingException$RequestId": null, + "ConflictException$Message": null, + "ConflictException$RequestId": "

    The AWS request id for this request.

    ", + "CreateDashboardResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "CreateDataSetResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "CreateDataSourceResponse$RequestId": "

    The AWS request ID for this operation.

    ", "CreateGroupMembershipResponse$RequestId": "

    The AWS request ID for this operation.

    ", "CreateGroupResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "CreateIAMPolicyAssignmentResponse$AssignmentId": "

    An ID for the assignment.

    ", + "CreateIAMPolicyAssignmentResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "CreateTemplateAliasResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "CreateTemplateResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DataSetConfiguration$Placeholder": "

    Placeholder.

    ", + "DataSourceErrorInfo$Message": "

    Error message.

    ", + "DeleteDashboardResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DeleteDataSetResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DeleteDataSourceResponse$RequestId": "

    The AWS request ID for this operation.

    ", "DeleteGroupMembershipResponse$RequestId": "

    The AWS request ID for this operation.

    ", "DeleteGroupResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DeleteIAMPolicyAssignmentResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DeleteTemplateAliasResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DeleteTemplateResponse$RequestId": "

    The AWS request ID for this operation.

    ", "DeleteUserByPrincipalIdRequest$PrincipalId": "

    The principal ID of the user.

    ", "DeleteUserByPrincipalIdResponse$RequestId": "

    The AWS request ID for this operation.

    ", "DeleteUserResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DescribeDashboardPermissionsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DescribeDashboardResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DescribeDataSetPermissionsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DescribeDataSetResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DescribeDataSourcePermissionsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DescribeDataSourceResponse$RequestId": "

    The AWS request ID for this operation.

    ", "DescribeGroupResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DescribeIAMPolicyAssignmentResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DescribeTemplateAliasResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DescribeTemplatePermissionsResponse$RequestId": "

    The AWS request ID for this operation.

    ", "DescribeUserResponse$RequestId": "

    The AWS request ID for this operation.

    ", "DomainNotWhitelistedException$Message": null, "DomainNotWhitelistedException$RequestId": "

    The AWS request ID for this request.

    ", - "GetDashboardEmbedUrlRequest$DashboardId": "

    The ID for the dashboard, also added to IAM policy

    ", "GetDashboardEmbedUrlResponse$RequestId": "

    The AWS request ID for this operation.

    ", "Group$PrincipalId": "

    The principal ID of the group.

    ", + "IAMPolicyAssignment$AssignmentId": "

    Assignment ID.

    ", + "IdentityMap$key": null, "IdentityTypeNotSupportedException$Message": null, "IdentityTypeNotSupportedException$RequestId": "

    The AWS request ID for this request.

    ", "InternalFailureException$Message": null, @@ -433,12 +2255,40 @@ "InvalidParameterValueException$RequestId": "

    The AWS request ID for this request.

    ", "LimitExceededException$Message": null, "LimitExceededException$RequestId": "

    The AWS request ID for this request.

    ", + "ListDashboardVersionsRequest$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListDashboardVersionsResponse$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListDashboardVersionsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "ListDashboardsRequest$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListDashboardsResponse$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListDashboardsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "ListDataSetsRequest$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListDataSetsResponse$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListDataSetsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "ListDataSourcesRequest$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListDataSourcesResponse$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListDataSourcesResponse$RequestId": "

    The AWS request ID for this operation.

    ", "ListGroupMembershipsRequest$NextToken": "

    A pagination token that can be used in a subsequent request.

    ", "ListGroupMembershipsResponse$NextToken": "

    A pagination token that can be used in a subsequent request.

    ", "ListGroupMembershipsResponse$RequestId": "

    The AWS request ID for this operation.

    ", "ListGroupsRequest$NextToken": "

    A pagination token that can be used in a subsequent request.

    ", "ListGroupsResponse$NextToken": "

    A pagination token that can be used in a subsequent request.

    ", "ListGroupsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "ListIAMPolicyAssignmentsForUserRequest$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListIAMPolicyAssignmentsForUserResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "ListIAMPolicyAssignmentsForUserResponse$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListIAMPolicyAssignmentsRequest$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListIAMPolicyAssignmentsResponse$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListIAMPolicyAssignmentsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "ListTagsForResourceResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "ListTemplateAliasesRequest$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListTemplateAliasesResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "ListTemplateAliasesResponse$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListTemplateVersionsRequest$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListTemplateVersionsResponse$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListTemplateVersionsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "ListTemplatesRequest$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListTemplatesResponse$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListTemplatesResponse$RequestId": "

    The AWS request ID for this operation.

    ", "ListUserGroupsRequest$NextToken": "

    A pagination token that can be used in a subsequent request.

    ", "ListUserGroupsResponse$NextToken": "

    A pagination token that can be used in a subsequent request.

    ", "ListUserGroupsResponse$RequestId": "

    The AWS request ID for this operation.

    ", @@ -447,6 +2297,7 @@ "ListUsersResponse$RequestId": "

    The AWS request ID for this operation.

    ", "PreconditionNotMetException$Message": null, "PreconditionNotMetException$RequestId": "

    The AWS request ID for this request.

    ", + "ProjectedColumnList$member": null, "QuickSightUserNotFoundException$Message": null, "QuickSightUserNotFoundException$RequestId": "

    The AWS request ID for this request.

    ", "RegisterUserRequest$Email": "

    The email address of the user that you want to register.

    ", @@ -461,27 +2312,357 @@ "ResourceUnavailableException$RequestId": "

    The AWS request ID for this request.

    ", "SessionLifetimeInMinutesInvalidException$Message": null, "SessionLifetimeInMinutesInvalidException$RequestId": "

    The AWS request ID for this request.

    ", + "StringList$member": null, + "TagResourceResponse$RequestId": "

    The AWS request ID for this operation.

    ", "ThrottlingException$Message": null, "ThrottlingException$RequestId": "

    The AWS request ID for this request.

    ", "UnsupportedUserEditionException$Message": null, "UnsupportedUserEditionException$RequestId": "

    The AWS request ID for this request.

    ", + "UntagResourceResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "UpdateDashboardPermissionsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "UpdateDashboardPublishedVersionResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "UpdateDashboardResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "UpdateDataSetPermissionsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "UpdateDataSetResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "UpdateDataSourcePermissionsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "UpdateDataSourceResponse$RequestId": "

    The AWS request ID for this operation.

    ", "UpdateGroupResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "UpdateIAMPolicyAssignmentResponse$AssignmentId": "

    The ID of the assignment.

    ", + "UpdateIAMPolicyAssignmentResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "UpdateTemplateAliasResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "UpdateTemplatePermissionsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "UpdateTemplateResponse$RequestId": "

    The AWS request ID for this operation.

    ", "UpdateUserRequest$Email": "

    The email address of the user that you want to update.

    ", "UpdateUserResponse$RequestId": "

    The AWS request ID for this operation.

    ", "User$Email": "

    The user's email address.

    ", "User$PrincipalId": "

    The principal ID of the user.

    " } }, + "StringList": { + "base": null, + "refs": { + "StringParameter$Values": "

    Values.

    " + } + }, + "StringParameter": { + "base": "

    String parameter.

    ", + "refs": { + "StringParameterList$member": null + } + }, + "StringParameterList": { + "base": null, + "refs": { + "Parameters$StringParameters": "

    String parameters.

    " + } + }, + "Tag": { + "base": "

    The keys of the key-value pairs for the resource tag or tags assigned to the resource.

    ", + "refs": { + "TagList$member": null + } + }, + "TagColumnOperation": { + "base": "

    A transform operation that tags a column with additional information.

    ", + "refs": { + "TransformOperation$TagColumnOperation": "

    An operation that tags a column with additional information.

    " + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    Tag key.

    ", + "TagKeyList$member": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceRequest$TagKeys": "

    The keys of the key-value pairs for the resource tag or tags assigned to the resource.

    " + } + }, + "TagList": { + "base": null, + "refs": { + "CreateDashboardRequest$Tags": "

    Contains a map of the key-value pairs for the resource tag or tags assigned to the dashboard.

    ", + "CreateDataSetRequest$Tags": "

    Contains a map of the key-value pairs for the resource tag or tags assigned to the dataset.

    ", + "CreateDataSourceRequest$Tags": "

    Contains a map of the key-value pairs for the resource tag or tags assigned to the data source.

    ", + "CreateTemplateRequest$Tags": "

    Contains a map of the key-value pairs for the resource tag or tags assigned to the resource.

    ", + "ListTagsForResourceResponse$Tags": "

    Contains a map of the key-value pairs for the resource tag or tags assigned to the resource.

    ", + "TagResourceRequest$Tags": "

    Contains a map of the key-value pairs for the resource tag or tags assigned to the resource.

    " + } + }, + "TagResourceRequest": { + "base": null, + "refs": { + } + }, + "TagResourceResponse": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    Tag value.

    " + } + }, + "Template": { + "base": "

    A template object. A template is an entity in QuickSight which encapsulates the metadata required to create an analysis that can be used to create dashboard. It adds a layer of abstraction by replacing the dataset associated with the analysis with placeholders. Templates can be used to create dashboards by replacing dataset placeholders with datasets which follow the same schema that was used to create the source analysis and template.

    You can share templates across AWS accounts by allowing users in other AWS accounts to create a template or a dashboard from an existing template.

    ", + "refs": { + "DescribeTemplateResponse$Template": "

    The template structure of the object you want to describe.

    " + } + }, + "TemplateAlias": { + "base": "

    The template alias.

    ", + "refs": { + "CreateTemplateAliasResponse$TemplateAlias": "

    Information on the template alias.

    ", + "DescribeTemplateAliasResponse$TemplateAlias": "

    Information about the template alias.

    ", + "TemplateAliasList$member": null, + "UpdateTemplateAliasResponse$TemplateAlias": "

    The template alias.

    " + } + }, + "TemplateAliasList": { + "base": null, + "refs": { + "ListTemplateAliasesResponse$TemplateAliasList": "

    A structure containing the list of template aliases.

    " + } + }, + "TemplateError": { + "base": "

    List of errors that occurred when the template version creation failed.

    ", + "refs": { + "TemplateErrorList$member": null + } + }, + "TemplateErrorList": { + "base": null, + "refs": { + "TemplateVersion$Errors": "

    Errors associated with the template.

    " + } + }, + "TemplateErrorType": { + "base": null, + "refs": { + "TemplateError$Type": "

    Type of error.

    " + } + }, + "TemplateName": { + "base": null, + "refs": { + "CreateTemplateRequest$Name": "

    A display name for the template.

    ", + "Template$Name": "

    The display name of the template.

    ", + "TemplateSummary$Name": "

    A display name for the template.

    ", + "UpdateTemplateRequest$Name": "

    The name for the template.

    " + } + }, + "TemplateSourceAnalysis": { + "base": "

    The source analysis of the template.

    ", + "refs": { + "TemplateSourceEntity$SourceAnalysis": "

    The source analysis, if it is based on an analysis.

    " + } + }, + "TemplateSourceEntity": { + "base": "

    The source entity of the template.

    ", + "refs": { + "CreateTemplateRequest$SourceEntity": "

    The ARN of the source entity from which this template is being created. Templates can be currently created from an analysis or another template. If the ARN is for an analysis, you must include its dataset references.

    ", + "UpdateTemplateRequest$SourceEntity": "

    The source QuickSight entity from which this template is being created. Templates can be currently created from an Analysis or another template.

    " + } + }, + "TemplateSourceTemplate": { + "base": "

    The source template of the template.

    ", + "refs": { + "TemplateSourceEntity$SourceTemplate": "

    The source template, if it is based on an template.

    " + } + }, + "TemplateSummary": { + "base": "

    The template summary.

    ", + "refs": { + "TemplateSummaryList$member": null + } + }, + "TemplateSummaryList": { + "base": null, + "refs": { + "ListTemplatesResponse$TemplateSummaryList": "

    A structure containing information about the templates in the list.

    " + } + }, + "TemplateVersion": { + "base": "

    A version of a template.

    ", + "refs": { + "Template$Version": "

    A structure describing the versions of the template.

    " + } + }, + "TemplateVersionSummary": { + "base": "

    The template version.

    ", + "refs": { + "TemplateVersionSummaryList$member": null + } + }, + "TemplateVersionSummaryList": { + "base": null, + "refs": { + "ListTemplateVersionsResponse$TemplateVersionSummaryList": "

    A structure containing a list of all the versions of the specified template.

    " + } + }, + "TeradataParameters": { + "base": "

    Teradata parameters.

    ", + "refs": { + "DataSourceParameters$TeradataParameters": "

    Teradata parameters.

    " + } + }, + "TextQualifier": { + "base": null, + "refs": { + "UploadSettings$TextQualifier": "

    Text qualifier.

    " + } + }, "ThrottlingException": { "base": "

    Access is throttled.

    ", "refs": { } }, + "Timestamp": { + "base": null, + "refs": { + "Dashboard$CreatedTime": "

    The time this was created.

    ", + "Dashboard$LastPublishedTime": "

    The last time this was published.

    ", + "Dashboard$LastUpdatedTime": "

    The last time this was updated.

    ", + "DashboardSummary$CreatedTime": "

    The time this was created.

    ", + "DashboardSummary$LastUpdatedTime": "

    The last time this was updated.

    ", + "DashboardSummary$LastPublishedTime": "

    The last time this was published.

    ", + "DashboardVersion$CreatedTime": "

    The time this was created.

    ", + "DashboardVersionSummary$CreatedTime": "

    The time this was created.

    ", + "DataSet$CreatedTime": "

    The time this was created.

    ", + "DataSet$LastUpdatedTime": "

    The last time this was updated.

    ", + "DataSetSummary$CreatedTime": "

    The time this was created.

    ", + "DataSetSummary$LastUpdatedTime": "

    The last time this was updated.

    ", + "DataSource$CreatedTime": "

    The time this was created.

    ", + "DataSource$LastUpdatedTime": "

    The last time this was updated.

    ", + "Template$LastUpdatedTime": "

    Time when this was last updated.

    ", + "Template$CreatedTime": "

    Time when this was created.

    ", + "TemplateSummary$CreatedTime": "

    The last time this was created.

    ", + "TemplateSummary$LastUpdatedTime": "

    The last time this was updated.

    ", + "TemplateVersion$CreatedTime": "

    The time this was created.

    ", + "TemplateVersionSummary$CreatedTime": "

    The time this was created.

    ", + "TimestampList$member": null + } + }, + "TimestampList": { + "base": null, + "refs": { + "DateTimeParameter$Values": "

    Values.

    " + } + }, + "TransformOperation": { + "base": "

    A data transformation on a logical table. This is a variant type structure. No more than one of the attributes should be non-null for this structure to be valid.

    ", + "refs": { + "TransformOperationList$member": null + } + }, + "TransformOperationList": { + "base": null, + "refs": { + "LogicalTable$DataTransforms": "

    Transform operations that act on this logical table.

    " + } + }, + "TwitterParameters": { + "base": "

    Twitter parameters.

    ", + "refs": { + "DataSourceParameters$TwitterParameters": "

    Twitter parameters.

    " + } + }, + "TypeCastFormat": { + "base": null, + "refs": { + "CastColumnTypeOperation$Format": "

    When casting a column from string to datetime type, you can supply a QuickSight supported format string to denote the source data format.

    " + } + }, "UnsupportedUserEditionException": { "base": "

    This error indicates that you are calling an operation on an Amazon QuickSight subscription where the edition doesn't include support for that operation. Amazon QuickSight currently has Standard Edition and Enterprise Edition. Not every operation and capability is available in every edition.

    ", "refs": { } }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, + "UntagResourceResponse": { + "base": null, + "refs": { + } + }, + "UpdateDashboardPermissionsRequest": { + "base": null, + "refs": { + } + }, + "UpdateDashboardPermissionsResponse": { + "base": null, + "refs": { + } + }, + "UpdateDashboardPublishedVersionRequest": { + "base": null, + "refs": { + } + }, + "UpdateDashboardPublishedVersionResponse": { + "base": null, + "refs": { + } + }, + "UpdateDashboardRequest": { + "base": null, + "refs": { + } + }, + "UpdateDashboardResponse": { + "base": null, + "refs": { + } + }, + "UpdateDataSetPermissionsRequest": { + "base": null, + "refs": { + } + }, + "UpdateDataSetPermissionsResponse": { + "base": null, + "refs": { + } + }, + "UpdateDataSetRequest": { + "base": null, + "refs": { + } + }, + "UpdateDataSetResponse": { + "base": null, + "refs": { + } + }, + "UpdateDataSourcePermissionsRequest": { + "base": null, + "refs": { + } + }, + "UpdateDataSourcePermissionsResponse": { + "base": null, + "refs": { + } + }, + "UpdateDataSourceRequest": { + "base": null, + "refs": { + } + }, + "UpdateDataSourceResponse": { + "base": null, + "refs": { + } + }, "UpdateGroupRequest": { "base": null, "refs": { @@ -492,6 +2673,55 @@ "refs": { } }, + "UpdateIAMPolicyAssignmentRequest": { + "base": null, + "refs": { + } + }, + "UpdateIAMPolicyAssignmentResponse": { + "base": null, + "refs": { + } + }, + "UpdateResourcePermissionList": { + "base": null, + "refs": { + "UpdateDashboardPermissionsRequest$GrantPermissions": "

    The permissions that you want to grant on this resource.

    ", + "UpdateDashboardPermissionsRequest$RevokePermissions": "

    The permissions that you want to revoke from this resource.

    ", + "UpdateTemplatePermissionsRequest$GrantPermissions": "

    A list of resource permissions to be granted on the template. The following example shows the shorthand syntax:

    Shorthand Syntax: Principal=string,Actions=string,string ...

    ", + "UpdateTemplatePermissionsRequest$RevokePermissions": "

    A list of resource permissions to be revoked from the template. Shorthand syntax: Shorthand Syntax: Principal=string,Actions=string,string ...

    " + } + }, + "UpdateTemplateAliasRequest": { + "base": null, + "refs": { + } + }, + "UpdateTemplateAliasResponse": { + "base": null, + "refs": { + } + }, + "UpdateTemplatePermissionsRequest": { + "base": null, + "refs": { + } + }, + "UpdateTemplatePermissionsResponse": { + "base": null, + "refs": { + } + }, + "UpdateTemplateRequest": { + "base": null, + "refs": { + } + }, + "UpdateTemplateResponse": { + "base": null, + "refs": { + } + }, "UpdateUserRequest": { "base": null, "refs": { @@ -502,6 +2732,12 @@ "refs": { } }, + "UploadSettings": { + "base": "

    Information on source file(s) format.

    ", + "refs": { + "S3Source$UploadSettings": "

    Information on the S3 source file(s) format.

    " + } + }, "User": { "base": "

    A registered user of Amazon QuickSight. Currently, an Amazon QuickSight subscription can't contain more than 20 million users.

    ", "refs": { @@ -522,6 +2758,7 @@ "refs": { "DeleteUserRequest$UserName": "

    The name of the user that you want to delete.

    ", "DescribeUserRequest$UserName": "

    The name of the user that you want to describe.

    ", + "ListIAMPolicyAssignmentsForUserRequest$UserName": "

    The name of the user.

    ", "ListUserGroupsRequest$UserName": "

    The Amazon QuickSight user name that you want to list group memberships for.

    ", "RegisterUserRequest$UserName": "

    The Amazon QuickSight user name that you want to create for the user you are registering.

    ", "UpdateUserRequest$UserName": "

    The Amazon QuickSight user name that you want to update.

    ", @@ -531,9 +2768,67 @@ "UserRole": { "base": null, "refs": { - "RegisterUserRequest$UserRole": "

    The Amazon QuickSight role of the user. The user role can be one of the following:

    • READER: A user who has read-only access to dashboards.

    • AUTHOR: A user who can create data sources, data sets, analyses, and dashboards.

    • ADMIN: A user who is an author, who can also manage Amazon QuickSight settings.

    ", - "UpdateUserRequest$Role": "

    The Amazon QuickSight role of the user. The user role can be one of the following:

    • READER: A user who has read-only access to dashboards.

    • AUTHOR: A user who can create data sources, data sets, analyses, and dashboards.

    • ADMIN: A user who is an author, who can also manage Amazon QuickSight settings.

    ", - "User$Role": "

    The Amazon QuickSight role for the user.

    " + "RegisterUserRequest$UserRole": "

    The Amazon QuickSight role for the user. The user role can be one of the following:

    • READER: A user who has read-only access to dashboards.

    • AUTHOR: A user who can create data sources, datasets, analyses, and dashboards.

    • ADMIN: A user who is an author, who can also manage Amazon QuickSight settings.

    • RESTRICTED_READER: This role isn't currently available for use.

    • RESTRICTED_AUTHOR: This role isn't currently available for use.

    ", + "UpdateUserRequest$Role": "

    The Amazon QuickSight role of the user. The user role can be one of the following:

    • READER: A user who has read-only access to dashboards.

    • AUTHOR: A user who can create data sources, datasets, analyses, and dashboards.

    • ADMIN: A user who is an author, who can also manage Amazon QuickSight settings.

    ", + "User$Role": "

    The Amazon QuickSight role for the user. The user role can be one of the following:.

    • READER: A user who has read-only access to dashboards.

    • AUTHOR: A user who can create data sources, datasets, analyses, and dashboards.

    • ADMIN: A user who is an author, who can also manage Amazon QuickSight settings.

    • RESTRICTED_READER: This role isn't currently available for use.

    • RESTRICTED_AUTHOR: This role isn't currently available for use.

    " + } + }, + "Username": { + "base": null, + "refs": { + "CredentialPair$Username": "

    Username.

    " + } + }, + "VersionDescription": { + "base": null, + "refs": { + "CreateDashboardRequest$VersionDescription": "

    A description for the first version of the dashboard being created.

    ", + "CreateTemplateRequest$VersionDescription": "

    A description of the current template version being created. This API created the first version of the template. Every time UpdateTemplate is called a new version is created. Each version of the template maintains a description of the version in the VersionDescription field.

    ", + "DashboardVersion$Description": "

    Description.

    ", + "DashboardVersionSummary$Description": "

    Description.

    ", + "TemplateVersion$Description": "

    The description of the template.

    ", + "TemplateVersionSummary$Description": "

    The desription of the template version.

    ", + "UpdateDashboardRequest$VersionDescription": "

    A description for the first version of the dashboard being created.

    ", + "UpdateTemplateRequest$VersionDescription": "

    A description of the current template version being created. This API created the first version of the template. Every time UpdateTemplate is called a new version is created. Each version of the template maintains a description of the version in the VersionDescription field.

    " + } + }, + "VersionNumber": { + "base": null, + "refs": { + "CreateTemplateAliasRequest$TemplateVersionNumber": "

    The version number of the template.

    ", + "DashboardSummary$PublishedVersionNumber": "

    Published version number.

    ", + "DashboardVersion$VersionNumber": "

    Version number.

    ", + "DashboardVersionSummary$VersionNumber": "

    Version number.

    ", + "DeleteDashboardRequest$VersionNumber": "

    The version number of the dashboard. If version number property is provided, only the specified version of the dashboard is deleted.

    ", + "DeleteTemplateRequest$VersionNumber": "

    The version number

    ", + "DescribeDashboardRequest$VersionNumber": "

    The version number for the dashboard. If version number isn’t passed the latest published dashboard version is described.

    ", + "DescribeTemplateRequest$VersionNumber": "

    This is an optional field, when a version number is provided the corresponding version is describe, if it's not provided the latest version of the template is described.

    ", + "TemplateAlias$TemplateVersionNumber": "

    The version number of the template alias.

    ", + "TemplateSummary$LatestVersionNumber": "

    A structure containing a list of version numbers for the template summary.

    ", + "TemplateVersion$VersionNumber": "

    The version number of the template.

    ", + "TemplateVersionSummary$VersionNumber": "

    The version number of the template version.

    ", + "UpdateDashboardPublishedVersionRequest$VersionNumber": "

    The version number of the dashboard.

    ", + "UpdateTemplateAliasRequest$TemplateVersionNumber": "

    The version number of the template.

    " + } + }, + "VpcConnectionProperties": { + "base": "

    VPC connection properties.

    ", + "refs": { + "CreateDataSourceRequest$VpcConnectionProperties": "

    You need to use this parameter only when you want QuickSight to use a VPC connection when connecting to your underlying source.

    ", + "DataSource$VpcConnectionProperties": "

    The VPC connection information. You need to use this parameter only when you want QuickSight to use a VPC connection when connecting to your underlying source.

    ", + "UpdateDataSourceRequest$VpcConnectionProperties": "

    You need to use this parameter only when you want QuickSight to use a VPC connection when connecting to your underlying source.

    " + } + }, + "Warehouse": { + "base": null, + "refs": { + "SnowflakeParameters$Warehouse": "

    Warehouse.

    " + } + }, + "WorkGroup": { + "base": null, + "refs": { + "AthenaParameters$WorkGroup": "

    The workgroup that Athena uses.

    " } }, "boolean": { @@ -542,6 +2837,39 @@ "GetDashboardEmbedUrlRequest$UndoRedoDisabled": "

    Remove the undo/redo button on embedded dashboard. The default is FALSE, which enables the undo/redo button.

    ", "GetDashboardEmbedUrlRequest$ResetDisabled": "

    Remove the reset button on embedded dashboard. The default is FALSE, which allows the reset button.

    " } + }, + "long": { + "base": null, + "refs": { + "Ingestion$IngestionTimeInSeconds": "

    The time this ingestion took, measured in seconds.

    ", + "Ingestion$IngestionSizeInBytes": "

    Size of the data ingested in bytes.

    ", + "RowInfo$RowsIngested": "

    The number of rows that were ingested.

    ", + "RowInfo$RowsDropped": "

    The number of rows that were not ingested.

    " + } + }, + "string": { + "base": null, + "refs": { + "CancelIngestionRequest$DataSetId": "

    The ID of the dataset used in the ingestion.

    ", + "CancelIngestionResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "CreateIngestionRequest$DataSetId": "

    The ID of the dataset used in the ingestion.

    ", + "CreateIngestionResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "DescribeIngestionRequest$DataSetId": "

    The ID of the dataset used in the ingestion.

    ", + "DescribeIngestionResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "ErrorInfo$Message": "

    Error essage.

    ", + "ListIngestionsRequest$DataSetId": "

    The ID of the dataset used in the ingestion.

    ", + "ListIngestionsRequest$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListIngestionsResponse$NextToken": "

    The token for the next set of results, or null if there are no more results.

    ", + "ListIngestionsResponse$RequestId": "

    The AWS request ID for this operation.

    ", + "QueueInfo$WaitingOnIngestion": "

    The ID of the queued ingestion.

    ", + "QueueInfo$QueuedIngestion": "

    The ID of the ongoing ingestion. The queued ingestion is waiting for the ongoing ingestion to complete.

    " + } + }, + "timestamp": { + "base": null, + "refs": { + "Ingestion$CreatedTime": "

    The time this ingestion started.

    " + } } } } diff --git a/models/apis/quicksight/2018-04-01/paginators-1.json b/models/apis/quicksight/2018-04-01/paginators-1.json index 5677bd8e4a2..31fce46f122 100644 --- a/models/apis/quicksight/2018-04-01/paginators-1.json +++ b/models/apis/quicksight/2018-04-01/paginators-1.json @@ -1,4 +1,44 @@ { "pagination": { + "ListDashboardVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDashboards": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDataSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDataSources": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListIngestions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTemplateAliases": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTemplateVersions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTemplates": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + } } } diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 199900dbcb5..2fb87a97ab7 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -25,7 +25,7 @@ "CreateDBSecurityGroup": "

    Creates a new DB security group. DB security groups control access to a DB instance.

    A DB security group controls access to EC2-Classic DB instances that are not in a VPC.

    ", "CreateDBSnapshot": "

    Creates a DBSnapshot. The source DBInstance must be in \"available\" state.

    ", "CreateDBSubnetGroup": "

    Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.

    ", - "CreateEventSubscription": "

    Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

    If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you are notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you are notified of events generated from all RDS sources belonging to your customer account.

    ", + "CreateEventSubscription": "

    Creates an RDS event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the RDS console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.

    You can specify the type of source (SourceType) you want to be notified of, provide a list of RDS sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.

    If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you are notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you receive notice of the events for that source type for all your RDS sources. If you do not specify either the SourceType nor the SourceIdentifier, you are notified of events generated from all RDS sources belonging to your customer account.

    RDS event notification is only available for unencrypted SNS topics. If you specify an encrypted SNS topic, event notifications aren't sent for the topic.

    ", "CreateGlobalCluster": "

    Creates an Aurora global database spread across multiple regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem.

    You can create a global database that is initially empty, and then add a primary cluster and a secondary cluster to it. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database.

    This action only applies to Aurora DB clusters.

    ", "CreateOptionGroup": "

    Creates a new option group. You can create up to 20 option groups.

    ", "DeleteCustomAvailabilityZone": "

    Deletes a custom Availability Zone (AZ).

    A custom AZ is an on-premises AZ that is integrated with a VMware vSphere cluster.

    For more information about RDS on VMware, see the RDS on VMware User Guide.

    ", @@ -89,7 +89,7 @@ "ModifyDBClusterSnapshotAttribute": "

    Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.

    To share a manual DB cluster snapshot with other AWS accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value all to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the all value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts. If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

    To view which AWS accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot public or private, use the DescribeDBClusterSnapshotAttributes API action.

    This action only applies to Aurora DB clusters.

    ", "ModifyDBInstance": "

    Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. To learn what modifications you can make to your DB instance, call DescribeValidDBInstanceModifications before you call ModifyDBInstance.

    ", "ModifyDBParameterGroup": "

    Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: ParameterName, ParameterValue, and ApplyMethod. A maximum of 20 parameters can be modified in a single request.

    Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB instance associated with the parameter group before the change can take effect.

    After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon RDS to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the character_set_database parameter. You can use the Parameter Groups option of the Amazon RDS console or the DescribeDBParameters command to verify that your DB parameter group has been created or modified.

    ", - "ModifyDBSnapshot": "

    Updates a manual DB snapshot, which can be encrypted or not encrypted, with a new engine version.

    Amazon RDS supports upgrading DB snapshots for MySQL and Oracle.

    ", + "ModifyDBSnapshot": "

    Updates a manual DB snapshot, which can be encrypted or not encrypted, with a new engine version.

    Amazon RDS supports upgrading DB snapshots for MySQL, Oracle, and PostgreSQL.

    ", "ModifyDBSnapshotAttribute": "

    Adds an attribute and values to, or removes an attribute and values from, a manual DB snapshot.

    To share a manual DB snapshot with other AWS accounts, specify restore as the AttributeName and use the ValuesToAdd parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB snapshot. Uses the value all to make the manual DB snapshot public, which means it can be copied or restored by all AWS accounts. Do not add the all value for any manual DB snapshots that contain private information that you don't want available to all AWS accounts. If the manual DB snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't use all as a value for that parameter in this case.

    To view which AWS accounts have access to copy or restore a manual DB snapshot, or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes API action.

    ", "ModifyDBSubnetGroup": "

    Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.

    ", "ModifyEventSubscription": "

    Modifies an existing RDS event notification subscription. Note that you can't modify the source identifiers using this call; to change source identifiers for a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription calls.

    You can see a list of the event categories for a given SourceType in the Events topic in the Amazon RDS User Guide or by using the DescribeEventCategories action.

    ", @@ -367,7 +367,7 @@ "CreateDBInstanceMessage$CopyTagsToSnapshot": "

    A value that indicates whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.

    Amazon Aurora

    Not applicable. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.

    ", "CreateDBInstanceMessage$EnableIAMDatabaseAuthentication": "

    A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.

    You can enable IAM database authentication for the following database engines:

    Amazon Aurora

    Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster.

    MySQL

    • For MySQL 5.6, minor version 5.6.34 or higher

    • For MySQL 5.7, minor version 5.7.16 or higher

    • For MySQL 8.0, minor version 8.0.16 or higher

    PostgreSQL

    • For PostgreSQL 9.5, minor version 9.5.15 or higher

    • For PostgreSQL 9.6, minor version 9.6.11 or higher

    • PostgreSQL 10.6, 10.7, and 10.9

    For more information, see IAM Database Authentication for MySQL and PostgreSQL in the Amazon RDS User Guide.

    ", "CreateDBInstanceMessage$EnablePerformanceInsights": "

    A value that indicates whether to enable Performance Insights for the DB instance.

    For more information, see Using Amazon Performance Insights in the Amazon Relational Database Service User Guide.

    ", - "CreateDBInstanceMessage$DeletionProtection": "

    A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see Deleting a DB Instance.

    ", + "CreateDBInstanceMessage$DeletionProtection": "

    A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see Deleting a DB Instance.

    Amazon Aurora

    Not applicable. You can enable or disable deletion protection for the DB cluster. For more information, see CreateDBCluster. DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.

    ", "CreateDBInstanceReadReplicaMessage$MultiAZ": "

    A value that indicates whether the Read Replica is in a Multi-AZ deployment.

    You can create a Read Replica as a Multi-AZ DB instance. RDS creates a standby of your replica in another Availability Zone for failover support for the replica. Creating your Read Replica as a Multi-AZ DB instance is independent of whether the source database is a Multi-AZ DB instance.

    ", "CreateDBInstanceReadReplicaMessage$AutoMinorVersionUpgrade": "

    A value that indicates whether minor engine upgrades are applied automatically to the Read Replica during the maintenance window.

    Default: Inherits from the source DB instance

    ", "CreateDBInstanceReadReplicaMessage$PubliclyAccessible": "

    A value that indicates whether the DB instance is publicly accessible. When the DB instance is publicly accessible, it is an Internet-facing instance with a publicly resolvable DNS name, which resolves to a public IP address. When the DB instance isn't publicly accessible, it is an internal instance with a DNS name that resolves to a private IP address. For more information, see CreateDBInstance.

    ", @@ -2058,7 +2058,7 @@ "DescribeReservedDBInstancesMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", "DescribeReservedDBInstancesOfferingsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more than the MaxRecords value is available, a pagination token called a marker is included in the response so you can retrieve the remaining results.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", "DescribeSourceRegionsMessage$MaxRecords": "

    The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results.

    Default: 100

    Constraints: Minimum 20, maximum 100.

    ", - "ModifyCurrentDBClusterCapacityMessage$Capacity": "

    The DB cluster capacity.

    When you change the capacity of a paused Aurora Serverless DB cluster, it automatically resumes.

    Constraints:

    • Value must be 1, 2, 4, 8, 16, 32, 64, 128, or 256.

    ", + "ModifyCurrentDBClusterCapacityMessage$Capacity": "

    The DB cluster capacity.

    When you change the capacity of a paused Aurora Serverless DB cluster, it automatically resumes.

    Constraints:

    • For Aurora MySQL, valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.

    • For Aurora PostgreSQL, valid capacity values are 2, 4, 8, 16, 32, 64, 192, and 384.

    ", "ModifyCurrentDBClusterCapacityMessage$SecondsBeforeTimeout": "

    The amount of time, in seconds, that Aurora Serverless tries to find a scaling point to perform seamless scaling before enforcing the timeout action. The default is 300.

    • Value must be from 10 through 600.

    ", "ModifyDBClusterMessage$BackupRetentionPeriod": "

    The number of days for which automated backups are retained. You must specify a minimum value of 1.

    Default: 1

    Constraints:

    • Must be a value from 1 to 35

    ", "ModifyDBClusterMessage$Port": "

    The port number on which the DB cluster accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB cluster.

    ", @@ -2098,8 +2098,8 @@ "RestoreDBInstanceFromS3Message$PerformanceInsightsRetentionPeriod": "

    The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

    ", "RestoreDBInstanceToPointInTimeMessage$Port": "

    The port number on which the database accepts connections.

    Constraints: Value must be 1150-65535

    Default: The same port as the original DB instance.

    ", "RestoreDBInstanceToPointInTimeMessage$Iops": "

    The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

    Constraints: Must be an integer greater than 1000.

    SQL Server

    Setting the IOPS value for the SQL Server database engine isn't supported.

    ", - "ScalingConfiguration$MinCapacity": "

    The minimum capacity for an Aurora DB cluster in serverless DB engine mode.

    Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.

    The minimum capacity must be less than or equal to the maximum capacity.

    ", - "ScalingConfiguration$MaxCapacity": "

    The maximum capacity for an Aurora DB cluster in serverless DB engine mode.

    Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.

    The maximum capacity must be greater than or equal to the minimum capacity.

    ", + "ScalingConfiguration$MinCapacity": "

    The minimum capacity for an Aurora DB cluster in serverless DB engine mode.

    For Aurora MySQL, valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.

    For Aurora PostgreSQL, valid capacity values are 2, 4, 8, 16, 32, 64, 192, and 384.

    The minimum capacity must be less than or equal to the maximum capacity.

    ", + "ScalingConfiguration$MaxCapacity": "

    The maximum capacity for an Aurora DB cluster in serverless DB engine mode.

    For Aurora MySQL, valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.

    For Aurora PostgreSQL, valid capacity values are 2, 4, 8, 16, 32, 64, 192, and 384.

    The maximum capacity must be greater than or equal to the minimum capacity.

    ", "ScalingConfiguration$SecondsUntilAutoPause": "

    The time, in seconds, before an Aurora DB cluster in serverless mode is paused.

    ", "ScalingConfigurationInfo$MinCapacity": "

    The maximum capacity for the Aurora DB cluster in serverless DB engine mode.

    ", "ScalingConfigurationInfo$MaxCapacity": "

    The maximum capacity for an Aurora DB cluster in serverless DB engine mode.

    ", @@ -3589,7 +3589,7 @@ "ModifyDBSnapshotAttributeMessage$DBSnapshotIdentifier": "

    The identifier for the DB snapshot to modify the attributes for.

    ", "ModifyDBSnapshotAttributeMessage$AttributeName": "

    The name of the DB snapshot attribute to modify.

    To manage authorization for other AWS accounts to copy or restore a manual DB snapshot, set this value to restore.

    ", "ModifyDBSnapshotMessage$DBSnapshotIdentifier": "

    The identifier of the DB snapshot to modify.

    ", - "ModifyDBSnapshotMessage$EngineVersion": "

    The engine version to upgrade the DB snapshot to.

    The following are the database engines and engine versions that are available when you upgrade a DB snapshot.

    MySQL

    • 5.5.46 (supported for 5.1 DB snapshots)

    Oracle

    • 12.1.0.2.v8 (supported for 12.1.0.1 DB snapshots)

    • 11.2.0.4.v12 (supported for 11.2.0.2 DB snapshots)

    • 11.2.0.4.v11 (supported for 11.2.0.3 DB snapshots)

    ", + "ModifyDBSnapshotMessage$EngineVersion": "

    The engine version to upgrade the DB snapshot to.

    The following are the database engines and engine versions that are available when you upgrade a DB snapshot.

    MySQL

    • 5.5.46 (supported for 5.1 DB snapshots)

    Oracle

    • 12.1.0.2.v8 (supported for 12.1.0.1 DB snapshots)

    • 11.2.0.4.v12 (supported for 11.2.0.2 DB snapshots)

    • 11.2.0.4.v11 (supported for 11.2.0.3 DB snapshots)

    PostgreSQL

    For the list of engine versions that are available for upgrading a DB snapshot, see Upgrading the PostgreSQL DB Engine for Amazon RDS.

    ", "ModifyDBSnapshotMessage$OptionGroupName": "

    The option group to identify with the upgraded DB snapshot.

    You can specify this parameter when you upgrade an Oracle DB snapshot. The same option group considerations apply when upgrading a DB snapshot as when upgrading a DB instance. For more information, see Option Group Considerations in the Amazon RDS User Guide.

    ", "ModifyDBSubnetGroupMessage$DBSubnetGroupName": "

    The name for the DB subnet group. This value is stored as a lowercase string. You can't modify the default subnet group.

    Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

    Example: mySubnetgroup

    ", "ModifyDBSubnetGroupMessage$DBSubnetGroupDescription": "

    The description for the DB subnet group.

    ", diff --git a/models/apis/runtime.sagemaker/2017-05-13/api-2.json b/models/apis/runtime.sagemaker/2017-05-13/api-2.json index 507801756e5..4214dc72f0e 100644 --- a/models/apis/runtime.sagemaker/2017-05-13/api-2.json +++ b/models/apis/runtime.sagemaker/2017-05-13/api-2.json @@ -37,6 +37,7 @@ "CustomAttributesHeader":{ "type":"string", "max":1024, + "pattern":"\\p{ASCII}*", "sensitive":true }, "EndpointName":{ @@ -46,7 +47,8 @@ }, "Header":{ "type":"string", - "max":1024 + "max":1024, + "pattern":"\\p{ASCII}*" }, "InternalFailure":{ "type":"structure", @@ -85,6 +87,11 @@ "shape":"CustomAttributesHeader", "location":"header", "locationName":"X-Amzn-SageMaker-Custom-Attributes" + }, + "TargetModel":{ + "shape":"TargetModelHeader", + "location":"header", + "locationName":"X-Amzn-SageMaker-Target-Model" } }, "payload":"Body" @@ -139,6 +146,12 @@ "synthetic":true }, "StatusCode":{"type":"integer"}, + "TargetModelHeader":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"\\A\\S[\\p{Print}]*\\z" + }, "ValidationError":{ "type":"structure", "members":{ diff --git a/models/apis/runtime.sagemaker/2017-05-13/docs-2.json b/models/apis/runtime.sagemaker/2017-05-13/docs-2.json index 3ecd8eed7e0..07398b40bcc 100644 --- a/models/apis/runtime.sagemaker/2017-05-13/docs-2.json +++ b/models/apis/runtime.sagemaker/2017-05-13/docs-2.json @@ -2,27 +2,27 @@ "version": "2.0", "service": "

    The Amazon SageMaker runtime API.

    ", "operations": { - "InvokeEndpoint": "

    After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint.

    For an overview of Amazon SageMaker, see How It Works.

    Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.

    Cals to InvokeEndpoint are authenticated by using AWS Signature Version 4. For information, see Authenticating Requests (AWS Signature Version 4) in the Amazon S3 API Reference.

    Endpoints are scoped to an individual account, and are not public. The URL does not contain the account ID, but Amazon SageMaker determines the account ID from the authentication token that is supplied by the caller.

    " + "InvokeEndpoint": "

    After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint.

    For an overview of Amazon SageMaker, see How It Works.

    Amazon SageMaker strips all POST headers except those supported by the API. Amazon SageMaker might add additional headers. You should not rely on the behavior of headers outside those enumerated in the request syntax.

    Calls to InvokeEndpoint are authenticated by using AWS Signature Version 4. For information, see Authenticating Requests (AWS Signature Version 4) in the Amazon S3 API Reference.

    A customer's model containers must respond to requests within 60 seconds. The model itself can have a maximum processing time of 60 seconds before responding to the /invocations. If your model is going to take 50-60 seconds of processing time, the SDK socket timeout should be set to be 70 seconds.

    Endpoints are scoped to an individual account, and are not public. The URL does not contain the account ID, but Amazon SageMaker determines the account ID from the authentication token that is supplied by the caller.

    " }, "shapes": { "BodyBlob": { "base": null, "refs": { - "InvokeEndpointInput$Body": "

    Provides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model.

    For information about the format of the request body, see Common Data Formats—Inference.

    ", - "InvokeEndpointOutput$Body": "

    Includes the inference provided by the model.

    For information about the format of the response body, see Common Data Formats—Inference.

    " + "InvokeEndpointInput$Body": "

    Provides input data, in the format specified in the ContentType request header. Amazon SageMaker passes all of the data in the body to the model.

    For information about the format of the request body, see Common Data Formats—Inference.

    ", + "InvokeEndpointOutput$Body": "

    Includes the inference provided by the model.

    For information about the format of the response body, see Common Data Formats—Inference.

    " } }, "CustomAttributesHeader": { "base": null, "refs": { - "InvokeEndpointInput$CustomAttributes": "

    ", - "InvokeEndpointOutput$CustomAttributes": "

    " + "InvokeEndpointInput$CustomAttributes": "

    Provides additional information about a request for an inference submitted to a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to provide an ID that you can use to track a request or to provide other metadata that a service endpoint was programmed to process. The value must consist of no more than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1). This feature is currently supported in the AWS SDKs but not in the Amazon SageMaker Python SDK.

    ", + "InvokeEndpointOutput$CustomAttributes": "

    Provides additional information in the response about the inference returned by a model hosted at an Amazon SageMaker endpoint. The information is an opaque value that is forwarded verbatim. You could use this value, for example, to return an ID received in the CustomAttributes header of a request or other metadata that a service endpoint was programmed to produce. The value must consist of no more than 1024 visible US-ASCII characters as specified in Section 3.3.6. Field Value Components of the Hypertext Transfer Protocol (HTTP/1.1). If the customer wants the custom attribute returned, the model must set the custom attribute to be included on the way back.

    This feature is currently supported in the AWS SDKs but not in the Amazon SageMaker Python SDK.

    " } }, "EndpointName": { "base": null, "refs": { - "InvokeEndpointInput$EndpointName": "

    The name of the endpoint that you specified when you created the endpoint using the CreateEndpoint API.

    " + "InvokeEndpointInput$EndpointName": "

    The name of the endpoint that you specified when you created the endpoint using the CreateEndpoint API.

    " } }, "Header": { @@ -66,7 +66,7 @@ } }, "ModelError": { - "base": "

    Model (owned by the customer in the container) returned an error 500.

    ", + "base": "

    Model (owned by the customer in the container) returned 4xx or 5xx error code.

    ", "refs": { } }, @@ -81,6 +81,12 @@ "ModelError$OriginalStatusCode": "

    Original status code.

    " } }, + "TargetModelHeader": { + "base": null, + "refs": { + "InvokeEndpointInput$TargetModel": "

    Specifies the model to be requested for an inference when invoking a multi-model endpoint.

    " + } + }, "ValidationError": { "base": "

    Inspect your request and try again.

    ", "refs": { diff --git a/models/apis/s3/2006-03-01/api-2.json b/models/apis/s3/2006-03-01/api-2.json index 46a665a0992..3699bab6cc3 100644 --- a/models/apis/s3/2006-03-01/api-2.json +++ b/models/apis/s3/2006-03-01/api-2.json @@ -2270,7 +2270,9 @@ "Account":{"shape":"AccountId"}, "StorageClass":{"shape":"StorageClass"}, "AccessControlTranslation":{"shape":"AccessControlTranslation"}, - "EncryptionConfiguration":{"shape":"EncryptionConfiguration"} + "EncryptionConfiguration":{"shape":"EncryptionConfiguration"}, + "ReplicationTime":{"shape":"ReplicationTime"}, + "Metrics":{"shape":"Metrics"} } }, "DisplayName":{"type":"string"}, @@ -2336,8 +2338,14 @@ "s3:ObjectRemoved:*", "s3:ObjectRemoved:Delete", "s3:ObjectRemoved:DeleteMarkerCreated", + "s3:ObjectRestore:*", "s3:ObjectRestore:Post", - "s3:ObjectRestore:Completed" + "s3:ObjectRestore:Completed", + "s3:Replication:*", + "s3:Replication:OperationFailedReplication", + "s3:Replication:OperationNotTracked", + "s3:Replication:OperationMissedThreshold", + "s3:Replication:OperationReplicatedAfterThreshold" ] }, "EventList":{ @@ -2345,6 +2353,20 @@ "member":{"shape":"Event"}, "flattened":true }, + "ExistingObjectReplication":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{"shape":"ExistingObjectReplicationStatus"} + } + }, + "ExistingObjectReplicationStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, "Expiration":{"type":"string"}, "ExpirationStatus":{ "type":"string", @@ -4271,6 +4293,17 @@ }, "MetadataKey":{"type":"string"}, "MetadataValue":{"type":"string"}, + "Metrics":{ + "type":"structure", + "required":[ + "Status", + "EventThreshold" + ], + "members":{ + "Status":{"shape":"MetricsStatus"}, + "EventThreshold":{"shape":"ReplicationTimeValue"} + } + }, "MetricsAndOperator":{ "type":"structure", "members":{ @@ -4304,6 +4337,14 @@ } }, "MetricsId":{"type":"string"}, + "MetricsStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "Minutes":{"type":"integer"}, "MissingMeta":{"type":"integer"}, "MultipartUpload":{ "type":"structure", @@ -5759,6 +5800,7 @@ "Filter":{"shape":"ReplicationRuleFilter"}, "Status":{"shape":"ReplicationRuleStatus"}, "SourceSelectionCriteria":{"shape":"SourceSelectionCriteria"}, + "ExistingObjectReplication":{"shape":"ExistingObjectReplication"}, "Destination":{"shape":"Destination"}, "DeleteMarkerReplication":{"shape":"DeleteMarkerReplication"} } @@ -5803,6 +5845,30 @@ "REPLICA" ] }, + "ReplicationTime":{ + "type":"structure", + "required":[ + "Status", + "Time" + ], + "members":{ + "Status":{"shape":"ReplicationTimeStatus"}, + "Time":{"shape":"ReplicationTimeValue"} + } + }, + "ReplicationTimeStatus":{ + "type":"string", + "enum":[ + "Enabled", + "Disabled" + ] + }, + "ReplicationTimeValue":{ + "type":"structure", + "members":{ + "Minutes":{"shape":"Minutes"} + } + }, "RequestCharged":{ "type":"string", "enum":["requester"] diff --git a/models/apis/s3/2006-03-01/docs-2.json b/models/apis/s3/2006-03-01/docs-2.json index c9408b86ad7..fcfb5e4a754 100644 --- a/models/apis/s3/2006-03-01/docs-2.json +++ b/models/apis/s3/2006-03-01/docs-2.json @@ -2,108 +2,108 @@ "version": "2.0", "service": "

    ", "operations": { - "AbortMultipartUpload": "

    Aborts a multipart upload.

    To verify that all parts have been removed, so you don't get charged for the part storage, you should call the List Parts operation and ensure the parts list is empty.

    ", - "CompleteMultipartUpload": "

    Completes a multipart upload by assembling previously uploaded parts.

    ", - "CopyObject": "

    Creates a copy of an object that is already stored in Amazon S3.

    ", - "CreateBucket": "

    Creates a new bucket.

    ", - "CreateMultipartUpload": "

    Initiates a multipart upload and returns an upload ID.

    Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

    ", - "DeleteBucket": "

    Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.

    ", - "DeleteBucketAnalyticsConfiguration": "

    Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).

    To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others.

    ", - "DeleteBucketCors": "

    Deletes the CORS configuration information set for the bucket.

    ", - "DeleteBucketEncryption": "

    Deletes the server-side encryption configuration from the bucket.

    ", - "DeleteBucketInventoryConfiguration": "

    Deletes an inventory configuration (identified by the inventory ID) from the bucket.

    ", - "DeleteBucketLifecycle": "

    Deletes the lifecycle configuration from the bucket.

    ", - "DeleteBucketMetricsConfiguration": "

    Deletes a metrics configuration (specified by the metrics configuration ID) from the bucket.

    ", - "DeleteBucketPolicy": "

    Deletes the policy from the bucket.

    ", - "DeleteBucketReplication": "

    Deletes the replication configuration from the bucket. For information about replication configuration, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

    ", - "DeleteBucketTagging": "

    Deletes the tags from the bucket.

    ", - "DeleteBucketWebsite": "

    This operation removes the website configuration from the bucket.

    ", - "DeleteObject": "

    Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.

    ", - "DeleteObjectTagging": "

    Removes the tag-set from an existing object.

    ", - "DeleteObjects": "

    This operation enables you to delete multiple objects from a bucket using a single HTTP request. You may specify up to 1000 keys.

    ", - "DeletePublicAccessBlock": "

    Removes the PublicAccessBlock configuration from an Amazon S3 bucket.

    ", - "GetBucketAccelerateConfiguration": "

    Returns the accelerate configuration of a bucket.

    ", - "GetBucketAcl": "

    Gets the access control policy for the bucket.

    ", - "GetBucketAnalyticsConfiguration": "

    Gets an analytics configuration for the bucket (specified by the analytics configuration ID).

    ", - "GetBucketCors": "

    Returns the CORS configuration for the bucket.

    ", - "GetBucketEncryption": "

    Returns the server-side encryption configuration of a bucket.

    ", - "GetBucketInventoryConfiguration": "

    Returns an inventory configuration (identified by the inventory ID) from the bucket.

    ", - "GetBucketLifecycle": "

    No longer used, see the GetBucketLifecycleConfiguration operation.

    ", - "GetBucketLifecycleConfiguration": "

    Returns the lifecycle configuration information set on the bucket.

    ", - "GetBucketLocation": "

    Returns the region the bucket resides in.

    ", - "GetBucketLogging": "

    Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.

    ", - "GetBucketMetricsConfiguration": "

    Gets a metrics configuration (specified by the metrics configuration ID) from the bucket.

    ", - "GetBucketNotification": "

    No longer used, see the GetBucketNotificationConfiguration operation.

    ", - "GetBucketNotificationConfiguration": "

    Returns the notification configuration of a bucket.

    ", - "GetBucketPolicy": "

    Returns the policy of a specified bucket.

    ", - "GetBucketPolicyStatus": "

    Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public.

    ", - "GetBucketReplication": "

    Returns the replication configuration of a bucket.

    It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.

    ", - "GetBucketRequestPayment": "

    Returns the request payment configuration of a bucket.

    ", - "GetBucketTagging": "

    Returns the tag set associated with the bucket.

    ", - "GetBucketVersioning": "

    Returns the versioning state of a bucket.

    ", - "GetBucketWebsite": "

    Returns the website configuration for a bucket.

    ", - "GetObject": "

    Retrieves objects from Amazon S3.

    ", - "GetObjectAcl": "

    Returns the access control list (ACL) of an object.

    ", - "GetObjectLegalHold": "

    Gets an object's current Legal Hold status.

    ", - "GetObjectLockConfiguration": "

    Gets the object lock configuration for a bucket. The rule specified in the object lock configuration will be applied by default to every new object placed in the specified bucket.

    ", - "GetObjectRetention": "

    Retrieves an object's retention settings.

    ", - "GetObjectTagging": "

    Returns the tag-set of an object.

    ", - "GetObjectTorrent": "

    Return torrent files from a bucket.

    ", - "GetPublicAccessBlock": "

    Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket.

    ", - "HeadBucket": "

    This operation is useful to determine if a bucket exists and you have permission to access it.

    ", - "HeadObject": "

    The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

    ", - "ListBucketAnalyticsConfigurations": "

    Lists the analytics configurations for the bucket.

    ", - "ListBucketInventoryConfigurations": "

    Returns a list of inventory configurations for the bucket.

    ", - "ListBucketMetricsConfigurations": "

    Lists the metrics configurations for the bucket.

    ", + "AbortMultipartUpload": "

    This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.

    To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts operation and ensure the parts list is empty.

    For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

    The following operations are related to AbortMultipartUpload

    ", + "CompleteMultipartUpload": "

    Completes a multipart upload by assembling previously uploaded parts.

    You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure the parts list is complete, this operation concatenates the parts you provide in the list. For each part in the list, you must provide the part number and the ETag value, returned after that part was uploaded.

    Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends whitespace characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.

    Note that if CompleteMultipartUpload fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.

    For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

    For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

    GetBucketLifecycle has the following special errors:

    • Error code: EntityTooSmall

      • Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.

      • 400 Bad Request

    • Error code: InvalidPart

      • Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.

      • 400 Bad Request

    • Error code: InvalidPartOrder

      • Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.

      • 400 Bad Request

    • Error code: NoSuchUpload

      • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

      • 404 Not Found

    The following operations are related to DeleteBucketMetricsConfiguration:

    ", + "CopyObject": "

    Creates a copy of an object that is already stored in Amazon S3.

    You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, for copying an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For conceptual information, see Copy Object Using the REST Multipart Upload API.

    When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.

    Amazon S3 Transfer Acceleration does not support cross-region copies. If you request a cross-region copy using a Transfer Acceleration endpoint, you get a 400 Bad Request error. For more information about transfer acceleration, see Transfer Acceleration.

    All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.

    To only copy an object under certain conditions, such as whether the Etag matches or whether the object was modified before or after a specified date, use the request parameters x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, or x-amz-copy-source-if-modified-since.

    All headers with the x-amz- prefix, including x-amz-copy-source, must be signed.

    You can use this operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass parameter. For more information, see Storage Classes.

    The source object that you are copying can be encrypted or unencrypted. If the source object is encrypted, it can be encrypted by server-side encryption using AWS-managed encryption keys or by using a customer-provided encryption key. When copying an object, you can request that Amazon S3 encrypt the target object by using either the AWS-managed encryption keys or by using your own encryption key. You can do this regardless of the form of server-side encryption that was used to encrypt the source, or even if the source object was not encrypted. For more information about server-side encryption, see Using Server-Side Encryption.

    A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK response. This means that a 200 OK response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.

    If the copy is successful, you receive a response with information about the copied object.

    If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.

    Consider the following when using request headers:

    • Consideration 1 – If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

      • x-amz-copy-source-if-match condition evaluates to true

      • x-amz-copy-source-if-unmodified-since condition evaluates to false

    • Consideration 2 – If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed response code:

      • x-amz-copy-source-if-none-match condition evaluates to false

      • x-amz-copy-source-if-modified-since condition evaluates to true

    The copy request charge is based on the storage class and Region you specify for the destination object. For pricing information, see Amazon S3 Pricing.

    Following are other considerations when using CopyObject:

    Versioning

    By default, x-amz-copy-source identifies the current version of an object to copy. (If the current version is a delete marker, Amazon S3 behaves as if the object was deleted.) To copy a different version, use the versionId subresource.

    If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id response header in the response.

    If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.

    If the source object's storage class is GLACIER, then you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see .

    Access Permissions

    When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

    • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

    • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    Server-Side- Encryption-Specific Request Headers

    To encrypt the target object, you must provide the appropriate encryption-related request headers. The one you use depends on whether you want to use AWS-managed encryption keys or provide your own encryption key.

    • To encrypt the target object using server-side encryption with an AWS-managed encryption key, provide the following request headers, as appropriate.

      • x-amz-server-side​-encryption

      • x-amz-server-side-encryption-aws-kms-key-id

      • x-amz-server-side-encryption-context

      If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon S3 uses the AWS managed customer master key (CMK) in KMS to protect the data.

      All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

      For more information on Server-Side Encryption with CMKs stored in Amazon KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in KMS.

    • To encrypt the target object using server-side encryption with an encryption key that you provide, use the following headers.

      • x-amz-server-side​-encryption​-customer-algorithm

      • x-amz-server-side​-encryption​-customer-key

      • x-amz-server-side​-encryption​-customer-key-MD5

    • If the source object is encrypted using server-side encryption with customer-provided encryption keys, you must use the following headers.

      • x-amz-copy-source​-server-side​-encryption​-customer-algorithm

      • x-amz-copy-source​-server-side​-encryption​-customer-key

      • x-amz-copy-source-​server-side​-encryption​-customer-key-MD5

      For more information on Server-Side Encryption with CMKs stored in Amazon KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon KMS.

    Access-Control-List (ACL)-Specific Request Headers

    You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the Access Control List (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

    • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

    • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly use:

      • x-amz-grant-read

      • x-amz-grant-write

      • x-amz-grant-read-acp

      • x-amz-grant-write-acp

      • x-amz-grant-full-control

      You specify each grantee as a type=value pair, where the type is one of the following:

      • emailAddress – if the value specified is the email address of an AWS account

      • id – if the value specified is the canonical user ID of an AWS account

      • uri – if you are granting permissions to a predefined group

      For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

      x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

    The following operation are related to CopyObject

    For more information, see Copying Objects.

    ", + "CreateBucket": "

    Creates a new bucket. To create a bucket, you must register with Amazon S3 and have a valid AWS Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

    Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.

    By default, the bucket is created in the US East (N. Virginia) region. You can optionally specify a region in the request body. You might choose a region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the EU (Ireland) region. For more information, see How to Select a Region for Your Buckets.

    If you send your create bucket request to the s3.amazonaws.com endpoint, the request go to the us-east-1 region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as region, even if the location constraint in the request specifies another region where the bucket is to be created. If you create a bucket in a region other than US East (N. Virginia) region, your application must be able to handle 307 redirect. For more information, see Virtual Hosting of Buckets.

    When creating a bucket using this operation, you can optionally specify the accounts or groups that should be granted specific permissions on the bucket. There are two ways to grant the appropriate permissions using the request headers.

    • Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

    • Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, x-amz-grant-full-control headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

      You specify each grantee as a type=value pair, where the type is one of the following:

      • emailAddress – if the value specified is the email address of an AWS account

      • id – if the value specified is the canonical user ID of an AWS account

      • uri – if you are granting permissions to a predefined group

      For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

      x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    The following operations are related to CreateBucket:

    ", + "CreateMultipartUpload": "

    This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.

    For more information about multipart uploads, see Multipart Upload Overview.

    If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

    For information about the permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

    For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).

    After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.

    You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart) and UploadPartCopy) requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload.

    To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, and kms:DescribeKey actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.

    If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.

    For more information, see Protecting Data Using Server-Side Encryption.

    Access Permissions

    When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

    • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

    • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    Server-Side- Encryption-Specific Request Headers

    You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS-managed encryption keys or provide your own encryption key.

    • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in Amazon Key Management Service (KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

      • x-amz-server-side​-encryption

      • x-amz-server-side-encryption-aws-kms-key-id

      • x-amz-server-side-encryption-context

      If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.

      All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

      For more information on Server-Side Encryption with CMKs Stored in Amazon KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

    • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

      • x-amz-server-side​-encryption​-customer-algorithm

      • x-amz-server-side​-encryption​-customer-key

      • x-amz-server-side​-encryption​-customer-key-MD5

      For more information on Server-Side Encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

    Access-Control-List (ACL)-Specific Request Headers

    You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the Access Control List (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

    • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

    • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly use:

      • x-amz-grant-read

      • x-amz-grant-write

      • x-amz-grant-read-acp

      • x-amz-grant-write-acp

      • x-amz-grant-full-control

      You specify each grantee as a type=value pair, where the type is one of the following:

      • emailAddress – if the value specified is the email address of an AWS account

      • id – if the value specified is the canonical user ID of an AWS account

      • uri – if you are granting permissions to a predefined group

      For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

      x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

    The following operations are related to CreateMultipartUpload:

    ", + "DeleteBucket": "

    Deletes the bucket. All objects (including all object versions and Delete Markers) in the bucket must be deleted before the bucket itself can be deleted.

    Related Resources

    ", + "DeleteBucketAnalyticsConfiguration": "

    Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).

    To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

    The following operations are related to DeleteBucketAnalyticsConfiguration:

    ", + "DeleteBucketCors": "

    Deletes the cors configuration information set for the bucket.

    To use this operation, you must have permission to perform the s3:PutBucketCORS action. The bucket owner has this permission by default and can grant this permission to others.

    For information more about cors, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

    Related Resources:

    ", + "DeleteBucketEncryption": "

    This implementation of the DELETE operation removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

    To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    Related Resources

    ", + "DeleteBucketInventoryConfiguration": "

    Deletes an inventory configuration (identified by the inventory ID) from the bucket.

    To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

    Operation related to DeleteBucketInventoryConfiguration include:

    ", + "DeleteBucketLifecycle": "

    Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration.

    To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration action. By default, the bucket owner has this permission and the bucket owner can grant this permission to others.

    There is usually some time lag before lifecycle configuration deletion is fully propagated to all the Amazon S3 systems.

    For more information about the object expiration, see Elements to Describe Lifecycle Actions.

    Related actions include:

    ", + "DeleteBucketMetricsConfiguration": "

    Deletes a metrics configuration for the Amazon CloudWatch request metrics (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.

    To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

    The following operations are related to DeleteBucketMetricsConfiguration

    ", + "DeleteBucketPolicy": "

    This implementation of the DELETE operation uses the policysubresource to delete the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the DeleteBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

    If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're notusing an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

    As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

    For more information about bucket policies, see Using Bucket Policies and UserPolicies.

    The following operations are related to DeleteBucketPolicy

    ", + "DeleteBucketReplication": "

    Deletes the replication configuration from the bucket.

    To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    It can take a while for the deletion of a replication configuration to fully propagate.

    For information about replication configuration, see Replication in the Amazon S3 Developer Guide.

    The following operations are related to DeleteBucketReplication

    ", + "DeleteBucketTagging": "

    Deletes the tags from the bucket.

    To use this operation, you must have permission to perform the s3:PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

    The following operations are related to DeleteBucketTagging

    ", + "DeleteBucketWebsite": "

    This operation removes the website configuration for a bucket. Amazon S3 returns a 200 OK response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 response if the bucket specified in the request does not exist.

    This DELETE operation requires the S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite permission.

    For more information about hosting websites, see Hosting Websites on Amazon S3.

    The following operations are related to DeleteBucketWebsite

    ", + "DeleteObject": "

    Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.

    To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, x-amz-delete-marker, to true.

    If the object you want to delete is in a bucket where the bucket versioning configurationis MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS.

    For more information about MFA Delete, see Using MFA Delete. To see sample requests that use versioning, see Sample Request.

    You can delete objects by explicitly calling the DELETE Object API or configure its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket you must deny them the s3:DeleteObject, s3:DeleteObjectVersion and s3:PutLifeCycleConfiguration actions.

    The following operation is related to DeleteObject

    ", + "DeleteObjectTagging": "

    Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging.

    To use this operation, you must have permission to perform the s3:DeleteObjectTagging action.

    To delete tags of a specific object version, add the versionId query parameter in the request. You will need permission for the s3:DeleteObjectVersionTagging action.

    The following operations are related to DeleteBucketMetricsConfiguration

    ", + "DeleteObjects": "

    This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

    The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success, or failure, in the response. Note that, if the object specified in the request is not found, Amazon S3 returns the result as deleted.

    The operation supports two modes for the response; verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion, the operation does not return any information about the delete in the response body.

    When performing this operation on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non versioned objects you are attempting to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete.

    Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not be altered in transit.

    The following operations are related to DeleteObjects

    ", + "DeletePublicAccessBlock": "

    Removes the PublicAccessBlock configuration for an Amazon S3 bucket. In order to use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    The following operations are related to DeleteBucketMetricsConfiguration:

    ", + "GetBucketAccelerateConfiguration": "

    This implementation of the GET operation uses the accelerate subresource to return the Transfer Acceleration state of a bucket, which is either Enabled or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.

    To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    You set the Transfer Acceleration state of an existing bucket to Enabled or Suspended by using the PutBucketAccelerateConfiguration operation.

    A GET accelerate request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state, if a state has never been set on the bucket.

    For more information on transfer acceleration, see Transfer Acceleration in the Amazon Simple Storage Service Developer Guide.

    Related Resources

    ", + "GetBucketAcl": "

    This implementation of the GET operation uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

    Related Resources

    ", + "GetBucketAnalyticsConfiguration": "

    This implementation of the GET operation returns an analytics configuration (identified by the analytics configuration ID) from the bucket.

    To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon Simple Storage Service Developer Guide.

    Related Resources

    ", + "GetBucketCors": "

    Returns the cors configuration information set for the bucket.

    To use this operation, you must have permission to perform the s3:GetBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

    To learn more cors, see Enabling Cross-Origin Resource SharingEnabling Cross-Origin Resource Sharing.

    The following operations are related to GetBucketCors:

    ", + "GetBucketEncryption": "

    Returns the default encryption configuration for an Amazon S3 bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.

    To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    The following operations are related to GetBucketEncryption:

    ", + "GetBucketInventoryConfiguration": "

    Returns an inventory configuration (identified by the inventory configuration ID) from the bucket.

    To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.

    The following operations are related to GetBucketInventoryConfiguration:

    ", + "GetBucketLifecycle": "

    For an updated version of this API, see GetBucketLifecycleConfiguration. If you configured a bucket lifecycle using the filter element, you should the updated version of this topic. This topic is provided for backward compatibility.

    Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

    To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    GetBucketLifecycle has the following special error:

    • Error code: NoSuchLifecycleConfiguration

      • Description: The lifecycle configuration does not exist.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

    The following operations are related to GetBucketLifecycle:

    ", + "GetBucketLifecycleConfiguration": "

    Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are still using previous version of the lifecycle configuration, it works. For the earlier API description, see GetBucketLifecycle.

    Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.

    To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    GetBucketLifecycleConfiguration has the following special error:

    • Error code: NoSuchLifecycleConfiguration

      • Description: The lifecycle configuration does not exist.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

    The following operations are related to DeleteBucketMetricsConfiguration:

    ", + "GetBucketLocation": "

    Returns the region the bucket resides in. You set the bucket's region using the LocationConstraint request parameter in a CreateBucket request. For more information, see CreateBucket.

    To use this implementation of the operation, you must be the bucket owner.

    The following operations are related to GetBucketLocation:

    ", + "GetBucketLogging": "

    Returns the logging status of a bucket and the permissions users have to view and modify that status. To use GET, you must be the bucket owner.

    The following operations are related to GetBucketLogging:

    ", + "GetBucketMetricsConfiguration": "

    Gets a metrics configuration (specified by the metrics configuration ID) from the bucket. Note that this doesn't include the daily storage metrics.

    To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

    The following operations are related to GetBucketMetricsConfiguration:

    ", + "GetBucketNotification": "

    No longer used, see GetBucketNotificationConfiguration.

    ", + "GetBucketNotificationConfiguration": "

    Returns the notification configuration of a bucket.

    If notifications are not enabled on the bucket, the operation returns an empty NotificationConfiguration element.

    By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification permission.

    For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.

    The following operation is related to GetBucketNotification:

    ", + "GetBucketPolicy": "

    Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

    If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

    As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

    For more information about bucket policies, see Using Bucket Policies and User Policies.

    The following operation is related to GetBucketPolicy:

    ", + "GetBucketPolicyStatus": "

    Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

    For more information about when Amazon S3 considers a bucket public, see The Meaning of \"Public\".

    The following operations are related to GetBucketPolicyStatus:

    ", + "GetBucketReplication": "

    Returns the replication configuration of a bucket.

    It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.

    For information about replication configuration, see Replication.

    This operation requires permissions for the s3:GetReplicationConfiguration action. For more information about permissions, see Using Bucket Policies and User Policies.

    If you include the Filter element in a replication configuration, you must also include the DeleteMarkerReplication and Priority elements. The response also returns those elements.

    GetBucketReplication has the following special error:

    • Error code: NoSuchReplicationConfiguration

      • Description: There is no replication configuration with that name.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

    The following operations are related to GetBucketReplication:

    ", + "GetBucketRequestPayment": "

    Returns the request payment configuration of a bucket. To use this version of the operation, you must be the bucket owner. For more information, see Requester Pays Buckets.

    The following operations are related to GetBucketRequestPayment:

    ", + "GetBucketTagging": "

    Returns the tag set associated with the bucket.

    To use this operation, you must have permission to perform the s3:GetBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

    GetBucketTagging has the following special error:

    • Error code: NoSuchTagSetError

      • Description: There is no tag set associated with the bucket.

    The following operations are related to GetBucketTagging:

    ", + "GetBucketVersioning": "

    Returns the versioning state of a bucket.

    To retrieve the versioning state of a bucket, you must be the bucket owner.

    This implementation also returns the MFA Delete status of the versioning state, i.e., if the MFA Delete status is enabled, the bucket owner must use an authentication device to change the versioning state of the bucket.

    The following operations are related to GetBucketVersioning:

    ", + "GetBucketWebsite": "

    Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.

    This GET operation requires the S3:GetBucketWebsite permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite permission.

    The following operations are related to DeleteBucketWebsite

    ", + "GetObject": "

    Retrieves objects from Amazon S3. To use GET, you must have READ access to the object. If you grant READ access to the anonymous user, you can return the object without using an authorization header.

    An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.

    To get an object from such a logical hierarchy, specify the full key name for the object in the GET operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the resource as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more information about request types, see HTTP Host Header Bucket Specification.

    To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

    If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE storage classes, before you can retrieve the object you must first restore a copy using . Otherwise, this operation returns an InvalidObjectStateError error. For information about restoring archived objects, see Restoring Archived Objects.

    Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

    If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

    Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging action), the response also returns the x-amz-tagging-count header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.

    Permissions

    You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    • If you have the s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.

    • If you don’t have the s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.

    Versioning

    By default, the GET operation returns the current version of an object. To return a different version, use the versionId subresource.

    If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true in the response.

    For more information about versioning, see PutBucketVersioning.

    Overriding Response Header Values

    There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.

    You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type, Content-Language, Expires, Cache-Control, Content-Disposition, and Content-Encoding. To override these header values in the GET response, you use the following request parameters.

    You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.

    • response-content-type

    • response-content-language

    • response-expires

    • response-cache-control

    • response-content-disposition

    • response-content-encoding

    Additional Considerations about Request Headers

    If both of the If-Match and If-Unmodified-Since headers are present in the request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since condition evaluates to false; then, S3 returns 200 OK and the data requested.

    If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: If-None-Match condition evaluates to false, and; If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified response code.

    For more information about conditional requests, see RFC 7232.

    The following operations are related to GetObject:

    ", + "GetObjectAcl": "

    Returns the access control list (ACL) of an object. To use this operation, you must have READ_ACP access to the object.

    Versioning

    By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.

    The following operations are related to GetObjectAcl:

    ", + "GetObjectLegalHold": "

    Gets an object's current Legal Hold status. For more information, see Locking Objects.

    ", + "GetObjectLockConfiguration": "

    Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.

    ", + "GetObjectRetention": "

    Retrieves an object's retention settings. For more information, see Locking Objects.

    ", + "GetObjectTagging": "

    Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.

    To use this operation, you must have permission to perform the s3:GetObjectTagging action. By default, the GET operation returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging action.

    By default, the bucket owner has this permission and can grant this permission to others.

    For information about the Amazon S3 object tagging feature, see Object Tagging.

    The following operation is related to GetObjectTagging:

    ", + "GetObjectTorrent": "

    Return torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Amazon S3 Torrent.

    You can get torrent only for objects that are less than 5 GB in size and that are not encrypted using server-side encryption with customer-provided encryption key.

    To use GET, you must have READ access to the object.

    The following operation is related to GetObjectTorrent:

    ", + "GetPublicAccessBlock": "

    Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. In order to use this operation, you must have the s3:GetBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

    When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock settings are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

    For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

    The following operations are related to GetPublicAccessBlock:

    ", + "HeadBucket": "

    This operation is useful to determine if a bucket exists and you have permission to access it. The operation returns a 200 OK if the bucket exists and you have permission to access it. Otherwise, the operation might return responses such as 404 Not Found and 403 Forbidden.

    To use this operation, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    ", + "HeadObject": "

    The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.

    A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body.

    If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).

    Encryption request headers, like x-amz-server-side-encryption, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

    Request headers are limited to 8 KB in size. For more information, see Common Request Headers.

    Consider the following when using request headers:

    • Consideration 1 – If both of the If-Match and If-Unmodified-Since headers are present in the request as follows:

      • If-Match condition evaluates to true, and;

      • If-Unmodified-Since condition evaluates to false;

      Then Amazon S3 returns 200 OK and the data requested.

    • Consideration 2 – If both of the If-None-Match and If-Modified-Since headers are present in the request as follows:

      • If-None-Match condition evaluates to false, and;

      • If-Modified-Since condition evaluates to true;

      Then Amazon S3 returns the 304 Not Modified response code.

    For more information about conditional requests, see RFC 7232.

    Permissions

    You need the s3:GetObject permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.

    • If you have the s3:ListBucket permission on the bucket, Amazon S3 will return a HTTP status code 404 (\"no such key\") error.

    • If you don’t have the s3:ListBucket permission, Amazon S3 will return a HTTP status code 403 (\"access denied\") error.

    The following operation is related to HeadObject:

    ", + "ListBucketAnalyticsConfigurations": "

    Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

    This operation supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there will be a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

    To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.

    The following operations are related to ListBucketAnalyticsConfigurations:

    ", + "ListBucketInventoryConfigurations": "

    Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.

    This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

    To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about the Amazon S3 inventory feature, see Amazon S3 Inventory

    The following operations are related to ListBucketInventoryConfigurations:

    ", + "ListBucketMetricsConfigurations": "

    Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.

    This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated element in the response. If there are no more configurations to list, IsTruncated is set to false. If there are more configurations to list, IsTruncated is set to true, and there is a value in NextContinuationToken. You use the NextContinuationToken value to continue the pagination of the list by passing the value in continuation-token in the request to GET the next page.

    To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.

    The following operations are related to ListBucketMetricsConfigurations:

    ", "ListBuckets": "

    Returns a list of all buckets owned by the authenticated sender of the request.

    ", - "ListMultipartUploads": "

    This operation lists in-progress multipart uploads.

    ", - "ListObjectVersions": "

    Returns metadata about all of the versions of objects in a bucket.

    ", - "ListObjects": "

    Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket.

    ", - "ListObjectsV2": "

    Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend you use this revised API for new application development.

    ", - "ListParts": "

    Lists the parts that have been uploaded for a specific multipart upload.

    ", - "PutBucketAccelerateConfiguration": "

    Sets the accelerate configuration of an existing bucket.

    ", - "PutBucketAcl": "

    Sets the permissions on a bucket using access control lists (ACL).

    ", - "PutBucketAnalyticsConfiguration": "

    Sets an analytics configuration for the bucket (specified by the analytics configuration ID).

    ", - "PutBucketCors": "

    Sets the CORS configuration for a bucket.

    ", - "PutBucketEncryption": "

    Creates a new server-side encryption configuration (or replaces an existing one, if present).

    ", - "PutBucketInventoryConfiguration": "

    Adds an inventory configuration (identified by the inventory ID) from the bucket.

    ", - "PutBucketLifecycle": "

    No longer used, see the PutBucketLifecycleConfiguration operation.

    ", - "PutBucketLifecycleConfiguration": "

    Sets lifecycle configuration for your bucket. If a lifecycle configuration exists, it replaces it.

    ", - "PutBucketLogging": "

    Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. To set the logging status of a bucket, you must be the bucket owner.

    ", - "PutBucketMetricsConfiguration": "

    Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.

    ", - "PutBucketNotification": "

    No longer used, see the PutBucketNotificationConfiguration operation.

    ", - "PutBucketNotificationConfiguration": "

    Enables notifications of specified events for a bucket.

    ", - "PutBucketPolicy": "

    Applies an Amazon S3 bucket policy to an Amazon S3 bucket.

    ", - "PutBucketReplication": "

    Creates a replication configuration or replaces an existing one. For more information, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

    ", - "PutBucketRequestPayment": "

    Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. Documentation on requester pays buckets can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html

    ", - "PutBucketTagging": "

    Sets the tags for a bucket.

    ", - "PutBucketVersioning": "

    Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.

    ", - "PutBucketWebsite": "

    Set the website configuration for a bucket.

    ", - "PutObject": "

    Adds an object to a bucket.

    ", - "PutObjectAcl": "

    uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket

    ", - "PutObjectLegalHold": "

    Applies a Legal Hold configuration to the specified object.

    ", - "PutObjectLockConfiguration": "

    Places an object lock configuration on the specified bucket. The rule specified in the object lock configuration will be applied by default to every new object placed in the specified bucket.

    ", - "PutObjectRetention": "

    Places an Object Retention configuration on an object.

    ", - "PutObjectTagging": "

    Sets the supplied tag-set to an object that already exists in a bucket

    ", - "PutPublicAccessBlock": "

    Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket.

    ", - "RestoreObject": "

    Restores an archived copy of an object back into Amazon S3

    ", - "SelectObjectContent": "

    This operation filters the contents of an Amazon S3 object based on a simple Structured Query Language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON or CSV) of the object. Amazon S3 uses this to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

    ", - "UploadPart": "

    Uploads a part in a multipart upload.

    Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

    ", - "UploadPartCopy": "

    Uploads a part by copying data from an existing object as data source.

    " + "ListMultipartUploads": "

    This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.

    This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated element with the value true. To list the additional multipart uploads, use the key-marker and upload-id-marker request parameters.

    In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.

    For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

    For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

    The following operations are related to ListMultipartUploads:

    ", + "ListObjectVersions": "

    Returns metadata about all of the versions of objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.

    A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

    To use this operation, you must have READ access to the bucket.

    The following operations are related to ListObjectVersions:

    ", + "ListObjects": "

    Returns some or all (up to 1000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

    This API has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

    The following operations are related to ListObjects:

    ", + "ListObjectsV2": "

    Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

    To use thisoperation, you must have READ access to the bucket.

    To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    This section describes the latest revision of the API. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.

    To get a list of your buckets, see ListBuckets.

    The following operations are related to ListObjectsV2:

    ", + "ListParts": "

    Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. In subsequent ListParts requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.

    For more information on multipart uploads, see Uploading Objects Using Multipart Upload.

    For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.

    The following operations are related to ListParts:

    ", + "PutBucketAccelerateConfiguration": "

    Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.

    To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    The Transfer Acceleration state of a bucket can be set to one of the following two values:

    • Enabled – Enables accelerated data transfers to the bucket.

    • Suspended – Disables accelerated data transfers to the bucket.

    The GetBucketAccelerateConfiguration operation returns the transfer acceleration state of a bucket.

    After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.

    The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").

    For more information about transfer acceleration, see Transfer Acceleration.

    The following operations are related to PutBucketAccelerateConfiguration:

    ", + "PutBucketAcl": "

    Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP permission.

    You can use one of the following two ways to set a bucket's permissions:

    • Specify the ACL in the request body

    • Specify permissions using request headers

    You cannot specify access permission using both the body and the request headers.

    Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

    Access Permissions

    You can set access permissions using one of the following methods:

    • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control specific headers in your request. For more information, see Canned ACL.

    • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers you specify explicit access permissions and grantees (AWS accounts or a Amazon S3 groups) who will receive the permission. If you use these ACL specific headers, you cannot use x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

      You specify each grantee as a type=value pair, where the type is one of the following:

      • emailAddress – if the value specified is the email address of an AWS account

      • id – if the value specified is the canonical user ID of an AWS account

      • uri – if you are granting permissions to a predefined group

      For example, the following x-amz-grant-write header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two AWS accounts identified by their email addresses.

      x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    Grantee Values

    You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

    • By Email address:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>

      The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

    • By the person's ID:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

      DisplayName is optional and ignored in the request

    • By URI:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

    Related Resources

    ", + "PutBucketAnalyticsConfiguration": "

    Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to 1,000 analytics configurations per bucket.

    You can choose to have storage class analysis export analysis reports to a comma-separated values (CSV) flat file, see the DataExport request element. Reports are updated daily and are based on the object filters you configure. When selecting data export you specify a destination bucket and optional destination prefix where the file is written. You can export the data to a destination bucket in a different account. However, the destination bucket must be in the same region as the bucket that you are making the PUT analytics configuration to. For more information, see Amazon S3 Analytics – Storage Class Analysis.

    You must create a bucket policy on the destination bucket where the exported file is written to grant permissions to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

    To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    Special Errors

      • HTTP Error: HTTP 400 Bad Request

      • Code: InvalidArgument

      • Cause: Invalid argument.

      • HTTP Error: HTTP 400 Bad Request

      • Code: TooManyConfigurations

      • Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.

      • HTTP Error: HTTP 403 Forbidden

      • Code: AccessDenied

      • Cause: You are not the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to set the configuration on the bucket.

    Related Resources

    ", + "PutBucketCors": "

    Sets the cors configuration for your bucket. If the configuration exists, Amazon S3 replaces it.

    To use this operation, you must be allowed to perform the s3:PutBucketCORS action. By default, the bucket owner has this permission and can grant it to others.

    You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com by using the browser's XMLHttpRequest capability.

    To enable cross-origin resource sharing (CORS) on a bucket, you add the cors subresource to the bucket. The cors subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.

    When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors configuration on the bucket and uses the first CORSRule rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:

    • The request's Origin header must match AllowedOrigin elements.

    • The request method (for example, GET, PUT, HEAD and so on) or the Access-Control-Request-Method header in case of a pre-flight OPTIONS request must be one of the AllowedMethod elements.

    • Every header specified in the Access-Control-Request-Headers request header of a pre-flight request must match an AllowedHeader element.

    For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

    Related Resources

    ", + "PutBucketEncryption": "

    This implementation of the PUT operation uses the encryption subresource to set the default encryption state of an existing bucket.

    This implementation of the PUT operation sets default encryption for a buckets using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS customer master keys (CMKs) (SSE-KMS) bucket. For information about the Amazon S3 default encryption feature, see As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action. in the Amazon Simple Storage Service Developer Guide.

    This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).

    To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    Related Resources

    ", + "PutBucketInventoryConfiguration": "

    This implementation of the PUT operation adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.

    Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.

    When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service Developer Guide.

    You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.

    To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    Special Errors

    • HTTP 400 Bad Request Error

      • Code: InvalidArgument

      • Cause: Invalid Argument

    • HTTP 400 Bad Request Error

      • Code: TooManyConfigurations

      • Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.

    • HTTP 403 Forbidden Error

      • Code: AccessDenied

      • Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration bucket permission to set the configuration on the bucket

    Related Resources

    ", + "PutBucketLifecycle": "

    For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.

    Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.

    By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration permission.

    You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

    • s3:DeleteObject

    • s3:DeleteObjectVersion

    • s3:PutLifecycleConfiguration

    For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.

    Related Resources

    ", + "PutBucketLifecycleConfiguration": "

    Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Managing Access Permissions to Your Amazon S3 Resources.

    Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.

    Rules

    You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. Each rule consists of the following:

    • Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, or a combination of both.

    • Status whether the rule is in effect.

    • One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.

    For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.

    Permissions

    By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the AWS account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.

    You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:

    • s3:DeleteObject

    • s3:DeleteObjectVersion

    • s3:PutLifecycleConfiguration

    For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.

    The following are related to PutBucketLifecycleConfiguration:

    ", + "PutBucketLogging": "

    Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same AWS Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.

    The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The Permissions request element specifies the kind of access the grantee has to the logs.

    Grantee Values

    You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

    • By the person's ID:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

      DisplayName is optional and ignored in the request.

    • By Email address:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>

      The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

    • By URI:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

    To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:

    <BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />

    For more information about server access logging, see Server Access Logging.

    For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.

    The following operations are related to PutBucketLogging:

    ", + "PutBucketMetricsConfiguration": "

    Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.

    To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.

    The following operations are related to PutBucketMetricsConfiguration:

    GetBucketLifecycle has the following special error:

    • Error code: TooManyConfigurations

      • Description:You are attempting to create a new configuration but have already reached the 1,000-configuration limit.

      • HTTP Status Code: HTTP 400 Bad Request

    ", + "PutBucketNotification": "

    No longer used, see the PutBucketNotificationConfiguration operation.

    ", + "PutBucketNotificationConfiguration": "

    Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.

    Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.

    By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration.

    <NotificationConfiguration>

    </NotificationConfiguration>

    This operation replaces the existing notification configuration with the configuration you include in the request body.

    After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.

    You can disable notifications by adding the empty NotificationConfiguration element.

    By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification permission.

    The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT operation will fail, and Amazon S3 will not add the configuration to your bucket.

    Responses

    If the configuration in the request body includes only one TopicConfiguration specifying only the s3:ReducedRedundancyLostObject event type, the response will also include the x-amz-sns-test-message-id header containing the message ID of the test notification sent to topic.

    The following operations is related to PutBucketNotificationConfiguration:

    ", + "PutBucketPolicy": "

    Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the PutBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.

    If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

    As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.

    For more information about bucket policies, see Using Bucket Policies and User Policies.

    The following operations are related to PutBucketPolicy:

    ", + "PutBucketReplication": "

    Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.

    To perform this operation, the user or role performing the operation must have the iam:PassRole permission.

    Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.

    A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset. All rules must specify the same destination bucket.

    To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication, Status, and Priority.

    For information about enabling versioning on a bucket, see Using Versioning.

    By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.

    Handling Replication of Encrypted Objects

    By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.

    PutBucketReplication has the following special errors:

    • Error code: InvalidRequest

      • Description: If the <Owner> in <AccessControlTranslation> has a value, the <Account> element must be specified.

      • HTTP 400

    • Error code: InvalidArgument

      • Description: The <Account> element is empty. It must contain a valid account ID.

      • HTTP 400

    • Error code: InvalidArgument

      • Description: The AWS account specified in the <Account> element must match the destination bucket owner.

      • HTTP 400

    The following operations are related to PutBucketReplication:

    ", + "PutBucketRequestPayment": "

    Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.

    The following operations are related to PutBucketRequestPayment:

    ", + "PutBucketTagging": "

    Sets the tags for a bucket.

    Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.

    Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.

    To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

    PutBucketTagging has the following special errors:

    • Error code: InvalidTagError

    • Error code: MalformedXMLError

      • Description: The XML provided does not match the schema.

    • Error code: OperationAbortedError

      • Description: A conflicting conditional operation is currently in progress against this resource. Please try again.

    • Error code: InternalError

      • Description: The service was unable to apply the provided tag to the bucket.

    The following operations are related to PutBucketTagging:

    ", + "PutBucketVersioning": "

    Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.

    You can set the versioning state with one of the following values:

    Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.

    Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.

    If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.

    If the bucket owner enables MFA Delete in the bucket versioning configuration, the bucket owner must include the x-amz-mfa request header and the Status and the MfaDelete request elements in a request to set the versioning state of the bucket.

    If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle policy will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.

    Related Resources

    ", + "PutBucketWebsite": "

    Sets the configuration of the website that is specified in the website subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.

    This PUT operation requires the S3:PutBucketWebsite permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite permission.

    To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.

    • WebsiteConfiguration

    • RedirectAllRequestsTo

    • HostName

    • Protocol

    If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.

    • WebsiteConfiguration

    • IndexDocument

    • Suffix

    • ErrorDocument

    • Key

    • RoutingRules

    • RoutingRule

    • Condition

    • HttpErrorCodeReturnedEquals

    • KeyPrefixEquals

    • Redirect

    • Protocol

    • HostName

    • ReplaceKeyPrefixWith

    • ReplaceKeyWith

    • HttpRedirectCode

    ", + "PutObject": "

    Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.

    Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.

    Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.

    To ensure that data is not corrupted traversing the network, use the Content-MD5 header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.

    To configure your application to send the request headers before sending the request body, use the 100-continue HTTP status code. For PUT operations, this helps you avoid sending the message body if the message is rejected based on the headers (for example, because authentication fails or a redirect occurs). For more information on the 100-continue HTTP status code, see Section 8.2.3 of http://www.ietf.org/rfc/rfc2616.txt.

    You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS-managed encryption keys. For more information, see Using Server-Side Encryption.

    Access Permissions

    You can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:

    • Specify a canned ACL with the x-amz-acl request header. For more information, see Canned ACL.

    • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    Server-Side- Encryption-Specific Request Headers

    You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS-managed encryption keys or provide your own encryption key.

    • Use encryption keys managed Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

      • x-amz-server-side​-encryption

      • x-amz-server-side-encryption-aws-kms-key-id

      • x-amz-server-side-encryption-context

      If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.

      All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

      For more information on Server-Side Encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS.

    • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

      • x-amz-server-side​-encryption​-customer-algorithm

      • x-amz-server-side​-encryption​-customer-key

      • x-amz-server-side​-encryption​-customer-key-MD5

      For more information on Server-Side Encryption with CMKs stored in KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

    Access-Control-List (ACL)-Specific Request Headers

    You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the Access Control List (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:

    • Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.

    • Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly use:

      • x-amz-grant-read

      • x-amz-grant-write

      • x-amz-grant-read-acp

      • x-amz-grant-write-acp

      • x-amz-grant-full-control

      You specify each grantee as a type=value pair, where the type is one of the following:

      • emailAddress – if the value specified is the email address of an AWS account

        Using email addresses to specify a grantee is only supported in the following AWS Regions:

        • US East (N. Virginia)

        • US West (N. California)

        • US West (Oregon)

        • Asia Pacific (Singapore)

        • Asia Pacific (Sydney)

        • Asia Pacific (Tokyo)

        • EU (Ireland)

        • South America (São Paulo)

        For a list of all the Amazon S3 supported regions and endpoints, see Regions and Endpoints in the AWS General Reference

      • id – if the value specified is the canonical user ID of an AWS account

      • uri – if you are granting permissions to a predefined group

      For example, the following x-amz-grant-read header grants the AWS accounts identified by email addresses permissions to read object data and its metadata:

      x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

    Server-Side- Encryption-Specific Request Headers

    You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS-managed encryption keys or provide your own encryption key.

    • Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.

      • x-amz-server-side​-encryption

      • x-amz-server-side-encryption-aws-kms-key-id

      • x-amz-server-side-encryption-context

      If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon S3 uses the default AWS KMS CMK to protect the data.

      All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.

      For more information on Server-Side Encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

    • Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.

      If you use this feature, the ETag value that Amazon S3 returns in the response is not the MD5 of the object.

      • x-amz-server-side​-encryption​-customer-algorithm

      • x-amz-server-side​-encryption​-customer-key

      • x-amz-server-side​-encryption​-customer-key-MD5

      For more information on Server-Side Encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.

    Storage Class Options

    By default, Amazon S3 uses the Standard storage class to store newly created objects. The Standard storage class provides high durability and high availability. You can specify other storage classes depending on the performance needs. For more information, see Storage Classes in the Amazon Simple Storage Service Developer Guide.

    Versioning

    If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response using the x-amz-version-id response header. If versioning is suspended, Amazon S3 always uses null as the version ID for the object stored. For more information about returning the versioning state of a bucket, see GetBucketVersioning. If you enable versioning for a bucket, when Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.

    Related Resources

    ", + "PutObjectAcl": "

    uses the acl subresource to set the access control list (ACL) permissions for an object that already exists in a bucket. You must have WRITE_ACP permission to set the ACL of an object.

    Depending on your application needs, you may choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.

    Access Permissions

    You can set access permissions using one of the following methods:

    • Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl. If you use this header, you cannot use other access control specific headers in your request. For more information, see Canned ACL.

    • Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using these headers you specify explicit access permissions and grantees (AWS accounts or a Amazon S3 groups) who will receive the permission. If you use these ACL specific headers, you cannot use x-amz-acl header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.

      You specify each grantee as a type=value pair, where the type is one of the following:

      • emailAddress – if the value specified is the email address of an AWS account

      • id – if the value specified is the canonical user ID of an AWS account

      • uri – if you are granting permissions to a predefined group

      For example, the following x-amz-grant-read header grants list objects permission to the two AWS accounts identified by their email addresses.

      x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"

    You can use either a canned ACL or specify access permissions explicitly. You cannot do both.

    Grantee Values

    You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:

    • By Email address:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>

      The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.

    • By the person's ID:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>

      DisplayName is optional and ignored in the request

    • By URI:

      <Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>

    Versioning

    The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId subresource.

    Related Resources

    ", + "PutObjectLegalHold": "

    Applies a Legal Hold configuration to the specified object.

    Related Resources

    ", + "PutObjectLockConfiguration": "

    Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket.

    DefaultRetention requires either Days or Years. You can't specify both at the same time.

    Related Resources

    ", + "PutObjectRetention": "

    Places an Object Retention configuration on an object.

    Related Resources

    ", + "PutObjectTagging": "

    Sets the supplied tag-set to an object that already exists in a bucket

    A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.

    For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.

    To use this operation, you must have permission to perform the s3:PutObjectTagging action. By default, the bucket owner has this permission and can grant this permission to others.

    To put tags of any other version, use the versionId query parameter. You also need permission for the s3:PutObjectVersionTagging action.

    For information about the Amazon S3 object tagging feature, see Object Tagging.

    Special Errors

      • Code: InvalidTagError

      • Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.

      • Code: MalformedXMLError

      • Cause: The XML provided does not match the schema.

      • Code: OperationAbortedError

      • Cause: A conflicting conditional operation is currently in progress against this resource. Please try again.

      • Code: InternalError

      • Cause: The service was unable to apply the provided tag to the object.

    Related Resources

    ", + "PutPublicAccessBlock": "

    Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. In order to use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

    When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

    For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

    Related Resources

    ", + "RestoreObject": "

    Restores an archived copy of an object back into Amazon S3

    This operation performs the following types of requests:

    • select - Perform a select query on an archived object

    • restore an archive - Restore an archived object

    To use this operation, you must have permissions to perform the s3:RestoreObject and s3:GetObject actions. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.

    Querying Archives with Select Requests

    You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

    When making a select request, do the following:

    • Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.

      For more information about the S3 structure in the request body, see the following:

    • Define the SQL expression for the SELECT type of restoration for your query in the request body's SelectParameters structure. You can use expressions like the following examples.

      • The following expression returns all records from the specified object.

        SELECT * FROM Object

      • Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.

        SELECT s._1, s._2 FROM Object s WHERE s._3 > 100

      • If you have headers and you set the fileHeaderInfo in the CSV structure in the request body to USE, you can specify headers in the query. (If you set the fileHeaderInfo field to IGNORE, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.

        SELECT s.Id, s.FirstName, s.SSN FROM S3Object s

    For more information about using SQL with Glacier Select restore, see SQL Reference for Amazon S3 Select and Glacier Select in the Amazon Simple Storage Service Developer Guide.

    When making a select request, you can also do the following:

    • To expedite your queries, specify the Expedited tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.

    • Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.

    The following are additional important facts about the select feature:

    • The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.

    • You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.

    • Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409.

    Restoring Archives

    Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To access an archived object, you must first initiate a restore request. This restores a temporary copy of the archived object. In a restore request, you specify the number of days that you want the restored copy to exist. After the specified period, Amazon S3 deletes the temporary copy but the object remains archived in the GLACIER or DEEP_ARCHIVE storage class that object was restored from.

    To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

    The time it takes restore jobs to finish depends on which storage class the object is being restored from and which data access tier you specify.

    When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier element of the request body:

    • Expedited - Expedited retrievals allow you to quickly access your data stored in the GLACIER storage class when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals are typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for the DEEP_ARCHIVE storage class.

    • Standard - Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for the GLACIER and DEEP_ARCHIVE retrieval requests that do not specify the retrieval option. Standard retrievals typically complete within 3-5 hours from the GLACIER storage class and typically complete within 12 hours from the DEEP_ARCHIVE storage class.

    • Bulk - Bulk retrievals are Amazon Glacier’s lowest-cost retrieval option, enabling you to retrieve large amounts, even petabytes, of data inexpensively in a day. Bulk retrievals typically complete within 5-12 hours from the GLACIER storage class and typically complete within 48 hours from the DEEP_ARCHIVE storage class.

    For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.

    You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. You upgrade the speed of an in-progress restoration by issuing another restore request to the same object, setting a new Tier request element. When issuing a request to upgrade the restore tier, you must choose a tier that is faster than the tier that the in-progress restore is using. You must not change any other parameters, such as the Days request element. For more information, see Upgrading the Speed of an In-Progress Restore in the Amazon Simple Storage Service Developer Guide.

    To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.

    After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

    If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.

    Responses

    A successful operation returns either the 200 OK or 202 Accepted status code.

    • If the object copy is not previously restored, then Amazon S3 returns 202 Accepted in the response.

    • If the object copy is previously restored, Amazon S3 returns 200 OK in the response.

    Special Errors

      • Code: RestoreAlreadyInProgress

      • Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: GlacierExpeditedRetrievalNotAvailable

      • Cause: Glacier expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to Standard or Bulk retrievals.)

      • HTTP Status Code: 503

      • SOAP Fault Code Prefix: N/A

    Related Resources

    ", + "SelectObjectContent": "

    This operation filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

    For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service Developer Guide.

    For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and Glacier Select in the Amazon Simple Storage Service Developer Guide.

    Permissions

    You must have s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service Developer Guide.

    Object Data Formats

    You can use Amazon S3 Select to query objects that have the following format properties:

    • CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.

    • UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.

    • GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.

    • Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.

      For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon Simple Storage Service Developer Guide.

      For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.

    Working with the Response Body

    Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see RESTSelectObjectAppendix .

    GetObject Support

    The SelectObjectContent operation does not support the following GetObject functionality. For more information, see GetObject.

    • Range: While you can specify a scan range for a Amazon S3 Select request, see SelectObjectContentRequest$ScanRange in the request parameters below, you cannot specify the range of bytes of an object to return.

    • GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. For more information, about storage classes see Storage Classes in the Amazon Simple Storage Service Developer Guide.

    Special Errors

    For a list of special errors for this operation and for general information about Amazon S3 errors and a list of error codes, see ErrorResponses

    Related Resources

    ", + "UploadPart": "

    Uploads a part in a multipart upload.

    In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.

    You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.

    Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.

    To ensure that data is not corrupted when traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.

    Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

    For more information on multipart uploads, go to Multipart Upload Overview in the Amazon Simple Storage Service Developer Guide .

    For information on the permissions required to use the multipart upload API, go to Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.

    You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the AWS-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.

    Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.

    If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.

    • x-amz-server-side​-encryption​-customer-algorithm

    • x-amz-server-side​-encryption​-customer-key

    • x-amz-server-side​-encryption​-customer-key-MD5

    Special Errors

      • Code: NoSuchUpload

      • Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

    Related Resources

    ", + "UploadPartCopy": "

    Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source in your request and a byte range by adding the request header x-amz-copy-source-range in your request.

    The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon Simple Storage Service Developer Guide.

    Instead of using an existing object as part data, you might use the UploadPart operation and provide data in your request.

    You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.

    For more information on using the UploadPartCopy operation, see the following topics:

    • For conceptual information on multipart uploads, go to Uploading Objects Using Multipart Upload in the Amazon Simple Storage Service Developer Guide.

    • For information on permissions required to use the multipart upload API, go to Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.

    • For information about copying objects using a single atomic operation vs. the multipart upload, go to Operations on Objects in the Amazon Simple Storage Service Developer Guide.

    • For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.

    Note the following additional considerations about the request headers x-amz-copy-source-if-match, x-amz-copy-source-if-none-match x-amz-copy-source-if-unmodified-since x-amz-copy-source-if-modified-since

    • Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are present in the request as follows:

      x-amz-copy-source-if-match condition evaluates to true, and;

      x-amz-copy-source-if-unmodified-since condition evaluates to false;

      then, S3 returns 200 OK and copies the data.

    • Consideration 2 - If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers are present in the request as follows:

      x-amz-copy-source-if-none-match condition evaluates to false, and;

      x-amz-copy-source-if-modified-since condition evaluates to true;

      then, S3 returns 412 Precondition Failed response code.

    Versioning

    If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source.

    You can optionally specify a specific version of the source object to copy by adding the versionId subresource as shown in the following example:

    x-amz-copy-source: /bucket/object?versionId=version id

    Special Errors

      • Code: NoSuchUpload

      • Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

      • HTTP Status Code: 404 Not Found

      • Code: InvalidRequest

      • Cause: The specified copy source is not supported as a byte-range copy source.

      • HTTP Status Code: 400 Bad Request

    Related Resources

    " }, "shapes": { "AbortDate": { "base": null, "refs": { - "CreateMultipartUploadOutput$AbortDate": "

    Date when multipart upload will become eligible for abort operation by lifecycle.

    ", - "ListPartsOutput$AbortDate": "

    Date when multipart upload will become eligible for abort operation by lifecycle.

    " + "CreateMultipartUploadOutput$AbortDate": "

    If the bucket has a lifecycle rule configured with an action to abort incomplete multipart uploads and the prefix in the lifecycle rule matches the object name in the request, the response includes this header. The header indicates when the initiated multipart upload becomes eligible for an abort operation. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

    The response also includes the x-amz-abort-rule-id header that provides the ID of the lifecycle configuration rule that defines this action.

    ", + "ListPartsOutput$AbortDate": "

    If the bucket has a lifecycle rule configured with an action to abort incomplete multipart uploads and the prefix in the lifecycle rule matches the object name in the request, then the response includes this header indicating when the initiated multipart upload will become eligible for abort operation. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

    The response will also include the x-amz-abort-rule-id header that will provide the ID of the lifecycle configuration rule that defines this action.

    " } }, "AbortIncompleteMultipartUpload": { "base": "

    Specifies the days since the initiation of an incomplete multipart upload that Amazon S3 will wait before permanently removing all parts of the upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy in the Amazon Simple Storage Service Developer Guide.

    ", "refs": { - "LifecycleRule$AbortIncompleteMultipartUpload": "

    ", - "Rule$AbortIncompleteMultipartUpload": "

    " + "LifecycleRule$AbortIncompleteMultipartUpload": null, + "Rule$AbortIncompleteMultipartUpload": null } }, "AbortMultipartUploadOutput": { @@ -119,21 +119,21 @@ "AbortRuleId": { "base": null, "refs": { - "CreateMultipartUploadOutput$AbortRuleId": "

    Id of the lifecycle rule that makes a multipart upload eligible for abort operation.

    ", - "ListPartsOutput$AbortRuleId": "

    Id of the lifecycle rule that makes a multipart upload eligible for abort operation.

    " + "CreateMultipartUploadOutput$AbortRuleId": "

    This header is returned along with the x-amz-abort-date header. It identifies the applicable lifecycle configuration rule that defines the action to abort incomplete multipart uploads.

    ", + "ListPartsOutput$AbortRuleId": "

    This header is returned along with the x-amz-abort-date header. It identifies applicable lifecycle configuration rule that defines the action to abort incomplete multipart uploads.

    " } }, "AccelerateConfiguration": { "base": "

    Configures the transfer acceleration state for an Amazon S3 bucket. For more information, see Amazon S3 Transfer Acceleration in the Amazon Simple Storage Service Developer Guide.

    ", "refs": { - "PutBucketAccelerateConfigurationRequest$AccelerateConfiguration": "

    Specifies the Accelerate Configuration you want to set for the bucket.

    " + "PutBucketAccelerateConfigurationRequest$AccelerateConfiguration": "

    Container for setting the transfer acceleration state.

    " } }, "AcceptRanges": { "base": null, "refs": { - "GetObjectOutput$AcceptRanges": "

    ", - "HeadObjectOutput$AcceptRanges": "

    " + "GetObjectOutput$AcceptRanges": "

    Indicates that a range of bytes was specifed.

    ", + "HeadObjectOutput$AcceptRanges": "

    Indicates that a range of bytes was specifed.

    " } }, "AccessControlPolicy": { @@ -153,7 +153,7 @@ "base": null, "refs": { "AnalyticsS3BucketDestination$BucketAccountId": "

    The account ID that owns the destination bucket. If no account ID is provided, the owner will not be validated prior to exporting data.

    ", - "Destination$Account": "

    Destination bucket owner account ID. In a cross-account scenario, if you direct Amazon S3 to change replica ownership to the AWS account that owns the destination bucket by specifying the AccessControlTranslation property, this is the account ID of the destination bucket owner. For more information, see Cross-Region Replication Additional Configuration: Change Replica Owner in the Amazon Simple Storage Service Developer Guide.

    ", + "Destination$Account": "

    Destination bucket owner account ID. In a cross-account scenario, if you direct Amazon S3 to change replica ownership to the AWS account that owns the destination bucket by specifying the AccessControlTranslation property, this is the account ID of the destination bucket owner. For more information, see Replication Additional Configuration: Change Replica Owner in the Amazon Simple Storage Service Developer Guide.

    ", "InventoryS3BucketDestination$AccountId": "

    The ID of the account that owns the destination bucket.

    " } }, @@ -206,7 +206,7 @@ } }, "AnalyticsConfiguration": { - "base": "

    Specifies the configuration and any analyses for the analytics filter of an Amazon S3 bucket.

    For more information, see GET Bucket analytics in the Amazon Simple Storage Service API Reference.

    ", + "base": "

    Specifies the configuration and any analyses for the analytics filter of an Amazon S3 bucket.

    ", "refs": { "AnalyticsConfigurationList$member": null, "GetBucketAnalyticsConfigurationOutput$AnalyticsConfiguration": "

    The configuration and any analyses for the analytics filter.

    ", @@ -226,7 +226,7 @@ } }, "AnalyticsFilter": { - "base": "

    ", + "base": "

    The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis.

    ", "refs": { "AnalyticsConfiguration$Filter": "

    The filter used to describe a set of objects for analyses. A filter must have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no filter is provided, all objects will be considered in any analysis.

    " } @@ -241,7 +241,7 @@ } }, "AnalyticsS3BucketDestination": { - "base": "

    ", + "base": "

    Contains information about where to publish the analytics results.

    ", "refs": { "AnalyticsExportDestination$S3BucketDestination": "

    A destination signifying output to an S3 bucket.

    " } @@ -256,14 +256,14 @@ "base": null, "refs": { "GetObjectOutput$Body": "

    Object data.

    ", - "GetObjectTorrentOutput$Body": "

    ", + "GetObjectTorrentOutput$Body": "

    A Bencoded dictionary as defined by the BitTorrent specification

    ", "PutObjectRequest$Body": "

    Object data.

    ", "RecordsEvent$Payload": "

    The byte array of partial, one or more result records.

    ", "UploadPartRequest$Body": "

    Object data.

    " } }, "Bucket": { - "base": "

    ", + "base": "

    In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is globally unique, and the namespace is shared by all AWS accounts.

    ", "refs": { "Buckets$member": null } @@ -281,7 +281,7 @@ } }, "BucketAlreadyOwnedByYou": { - "base": "

    ", + "base": "

    The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all AWS Regions except in the North Virginia region. For legacy compatibility, if you re-create an existing bucket that you already own in the North Virginia region, Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).

    ", "refs": { } }, @@ -295,20 +295,20 @@ "BucketLifecycleConfiguration": { "base": "

    Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For more information, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.

    ", "refs": { - "PutBucketLifecycleConfigurationRequest$LifecycleConfiguration": "

    " + "PutBucketLifecycleConfigurationRequest$LifecycleConfiguration": "

    Container for lifecycle rules. You can add as many as 1,000 rules.

    " } }, "BucketLocationConstraint": { "base": null, "refs": { "CreateBucketConfiguration$LocationConstraint": "

    Specifies the region where the bucket will be created. If you don't specify a region, the bucket is created in US East (N. Virginia) Region (us-east-1).

    ", - "GetBucketLocationOutput$LocationConstraint": "

    " + "GetBucketLocationOutput$LocationConstraint": "

    Specifies the region where the bucket resides. For a list of all the Amazon S3 supported location constraints by region, see Regions and Endpoints.

    " } }, "BucketLoggingStatus": { - "base": "

    ", + "base": "

    Container for logging status information.

    ", "refs": { - "PutBucketLoggingRequest$BucketLoggingStatus": "

    " + "PutBucketLoggingRequest$BucketLoggingStatus": "

    Container for logging status information.

    " } }, "BucketLogsPermission": { @@ -320,103 +320,103 @@ "BucketName": { "base": null, "refs": { - "AbortMultipartUploadRequest$Bucket": "

    Name of the bucket to which the multipart upload was initiated.

    ", + "AbortMultipartUploadRequest$Bucket": "

    The bucket to which the upload was taking place.

    ", "AnalyticsS3BucketDestination$Bucket": "

    The Amazon Resource Name (ARN) of the bucket to which data is exported.

    ", "Bucket$Name": "

    The name of the bucket.

    ", - "CompleteMultipartUploadOutput$Bucket": "

    ", - "CompleteMultipartUploadRequest$Bucket": "

    ", - "CopyObjectRequest$Bucket": "

    ", - "CreateBucketRequest$Bucket": "

    ", + "CompleteMultipartUploadOutput$Bucket": "

    The name of the bucket that contains the newly created object.

    ", + "CompleteMultipartUploadRequest$Bucket": "

    Name of the bucket to which the multipart upload was initiated.

    ", + "CopyObjectRequest$Bucket": "

    The name of the destination bucket.

    ", + "CreateBucketRequest$Bucket": "

    The name of the bucket to create.

    ", "CreateMultipartUploadOutput$Bucket": "

    Name of the bucket to which the multipart upload was initiated.

    ", - "CreateMultipartUploadRequest$Bucket": "

    ", + "CreateMultipartUploadRequest$Bucket": "

    The name of the bucket to which to initiate the upload

    ", "DeleteBucketAnalyticsConfigurationRequest$Bucket": "

    The name of the bucket from which an analytics configuration is deleted.

    ", - "DeleteBucketCorsRequest$Bucket": "

    ", + "DeleteBucketCorsRequest$Bucket": "

    Specifies the bucket whose cors configuration is being deleted.

    ", "DeleteBucketEncryptionRequest$Bucket": "

    The name of the bucket containing the server-side encryption configuration to delete.

    ", "DeleteBucketInventoryConfigurationRequest$Bucket": "

    The name of the bucket containing the inventory configuration to delete.

    ", - "DeleteBucketLifecycleRequest$Bucket": "

    ", + "DeleteBucketLifecycleRequest$Bucket": "

    The bucket name of the lifecycle to delete.

    ", "DeleteBucketMetricsConfigurationRequest$Bucket": "

    The name of the bucket containing the metrics configuration to delete.

    ", - "DeleteBucketPolicyRequest$Bucket": "

    ", - "DeleteBucketReplicationRequest$Bucket": "

    The bucket name.

    It can take a while to propagate the deletion of a replication configuration to all Amazon S3 systems.

    ", - "DeleteBucketRequest$Bucket": "

    ", - "DeleteBucketTaggingRequest$Bucket": "

    ", - "DeleteBucketWebsiteRequest$Bucket": "

    ", - "DeleteObjectRequest$Bucket": "

    ", - "DeleteObjectTaggingRequest$Bucket": "

    ", - "DeleteObjectsRequest$Bucket": "

    ", + "DeleteBucketPolicyRequest$Bucket": "

    The bucket name.

    ", + "DeleteBucketReplicationRequest$Bucket": "

    The bucket name.

    ", + "DeleteBucketRequest$Bucket": "

    Specifies the bucket being deleted.

    ", + "DeleteBucketTaggingRequest$Bucket": "

    The bucket that has the tag set to be removed.

    ", + "DeleteBucketWebsiteRequest$Bucket": "

    The bucket name for which you want to remove the website configuration.

    ", + "DeleteObjectRequest$Bucket": "

    The bucket name of the bucket containing the object.

    ", + "DeleteObjectTaggingRequest$Bucket": "

    The bucket containing the objects from which to remove the tags.

    ", + "DeleteObjectsRequest$Bucket": "

    The bucket name containing the objects to delete.

    ", "DeletePublicAccessBlockRequest$Bucket": "

    The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete.

    ", - "Destination$Bucket": "

    The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store replicas of the object identified by the rule.

    A replication configuration can replicate objects to only one destination bucket. If there are multiple rules in your replication configuration, all rules must specify the same destination bucket.

    ", + "Destination$Bucket": "

    The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store the results.

    ", "GetBucketAccelerateConfigurationRequest$Bucket": "

    Name of the bucket for which the accelerate configuration is retrieved.

    ", - "GetBucketAclRequest$Bucket": "

    ", + "GetBucketAclRequest$Bucket": "

    Specifies the S3 bucket whose ACL is being requested.

    ", "GetBucketAnalyticsConfigurationRequest$Bucket": "

    The name of the bucket from which an analytics configuration is retrieved.

    ", - "GetBucketCorsRequest$Bucket": "

    ", + "GetBucketCorsRequest$Bucket": "

    The bucket name for which to get the cors configuration.

    ", "GetBucketEncryptionRequest$Bucket": "

    The name of the bucket from which the server-side encryption configuration is retrieved.

    ", "GetBucketInventoryConfigurationRequest$Bucket": "

    The name of the bucket containing the inventory configuration to retrieve.

    ", - "GetBucketLifecycleConfigurationRequest$Bucket": "

    ", - "GetBucketLifecycleRequest$Bucket": "

    ", - "GetBucketLocationRequest$Bucket": "

    ", - "GetBucketLoggingRequest$Bucket": "

    ", + "GetBucketLifecycleConfigurationRequest$Bucket": "

    The name of the bucket for which to the the lifecycle information.

    ", + "GetBucketLifecycleRequest$Bucket": "

    The name of the bucket for which to the the lifecycle information.

    ", + "GetBucketLocationRequest$Bucket": "

    The name of the bucket for which to get the location.

    ", + "GetBucketLoggingRequest$Bucket": "

    The bucket name for which to get the logging information.

    ", "GetBucketMetricsConfigurationRequest$Bucket": "

    The name of the bucket containing the metrics configuration to retrieve.

    ", - "GetBucketNotificationConfigurationRequest$Bucket": "

    Name of the bucket to get the notification configuration for.

    ", - "GetBucketPolicyRequest$Bucket": "

    ", + "GetBucketNotificationConfigurationRequest$Bucket": "

    Name of the bucket for which to get the notification configuration

    ", + "GetBucketPolicyRequest$Bucket": "

    The bucket name for which to get the bucket policy.

    ", "GetBucketPolicyStatusRequest$Bucket": "

    The name of the Amazon S3 bucket whose policy status you want to retrieve.

    ", - "GetBucketReplicationRequest$Bucket": "

    ", - "GetBucketRequestPaymentRequest$Bucket": "

    ", - "GetBucketTaggingRequest$Bucket": "

    ", - "GetBucketVersioningRequest$Bucket": "

    ", - "GetBucketWebsiteRequest$Bucket": "

    ", - "GetObjectAclRequest$Bucket": "

    ", + "GetBucketReplicationRequest$Bucket": "

    The bucket name for which to get the replication information.

    ", + "GetBucketRequestPaymentRequest$Bucket": "

    The name of the bucket for which to get the payment request configuration

    ", + "GetBucketTaggingRequest$Bucket": "

    The name of the bucket for which to get the tagging information.

    ", + "GetBucketVersioningRequest$Bucket": "

    The name of the bucket for which to get the versioning information.

    ", + "GetBucketWebsiteRequest$Bucket": "

    The bucket name for which to get the website configuration.

    ", + "GetObjectAclRequest$Bucket": "

    The bucket name of the object for which to get the ACL information.

    ", "GetObjectLegalHoldRequest$Bucket": "

    The bucket containing the object whose Legal Hold status you want to retrieve.

    ", - "GetObjectLockConfigurationRequest$Bucket": "

    The bucket whose object lock configuration you want to retrieve.

    ", - "GetObjectRequest$Bucket": "

    ", + "GetObjectLockConfigurationRequest$Bucket": "

    The bucket whose Object Lock configuration you want to retrieve.

    ", + "GetObjectRequest$Bucket": "

    The bucket name containing the object.

    ", "GetObjectRetentionRequest$Bucket": "

    The bucket containing the object whose retention settings you want to retrieve.

    ", - "GetObjectTaggingRequest$Bucket": "

    ", - "GetObjectTorrentRequest$Bucket": "

    ", + "GetObjectTaggingRequest$Bucket": "

    The bucket name containing the object for which to get the tagging information.

    ", + "GetObjectTorrentRequest$Bucket": "

    The name of the bucket containing the object for which to get the torrent files.

    ", "GetPublicAccessBlockRequest$Bucket": "

    The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want to retrieve.

    ", - "HeadBucketRequest$Bucket": "

    ", - "HeadObjectRequest$Bucket": "

    ", + "HeadBucketRequest$Bucket": "

    The bucket name.

    ", + "HeadObjectRequest$Bucket": "

    The name of the bucket containing the object.

    ", "InventoryS3BucketDestination$Bucket": "

    The Amazon resource name (ARN) of the bucket where inventory results will be published.

    ", "ListBucketAnalyticsConfigurationsRequest$Bucket": "

    The name of the bucket from which analytics configurations are retrieved.

    ", "ListBucketInventoryConfigurationsRequest$Bucket": "

    The name of the bucket containing the inventory configurations to retrieve.

    ", "ListBucketMetricsConfigurationsRequest$Bucket": "

    The name of the bucket containing the metrics configurations to retrieve.

    ", "ListMultipartUploadsOutput$Bucket": "

    Name of the bucket to which the multipart upload was initiated.

    ", - "ListMultipartUploadsRequest$Bucket": "

    ", - "ListObjectVersionsOutput$Name": "

    ", - "ListObjectVersionsRequest$Bucket": "

    ", - "ListObjectsOutput$Name": "

    ", - "ListObjectsRequest$Bucket": "

    ", - "ListObjectsV2Output$Name": "

    Name of the bucket to list.

    ", + "ListMultipartUploadsRequest$Bucket": "

    Name of the bucket to which the multipart upload was initiated.

    ", + "ListObjectVersionsOutput$Name": "

    Bucket owner's name.

    ", + "ListObjectVersionsRequest$Bucket": "

    The name of the bucket that contains the objects.

    ", + "ListObjectsOutput$Name": "

    Name of the bucket.

    ", + "ListObjectsRequest$Bucket": "

    The name of the bucket containing the objects.

    ", + "ListObjectsV2Output$Name": "

    Name of the bucket.

    ", "ListObjectsV2Request$Bucket": "

    Name of the bucket to list.

    ", "ListPartsOutput$Bucket": "

    Name of the bucket to which the multipart upload was initiated.

    ", - "ListPartsRequest$Bucket": "

    ", + "ListPartsRequest$Bucket": "

    Name of the bucket to which the parts are being uploaded.->

    ", "PutBucketAccelerateConfigurationRequest$Bucket": "

    Name of the bucket for which the accelerate configuration is set.

    ", - "PutBucketAclRequest$Bucket": "

    ", + "PutBucketAclRequest$Bucket": "

    The bucket to which to apply the ACL.

    ", "PutBucketAnalyticsConfigurationRequest$Bucket": "

    The name of the bucket to which an analytics configuration is stored.

    ", - "PutBucketCorsRequest$Bucket": "

    ", - "PutBucketEncryptionRequest$Bucket": "

    Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

    ", + "PutBucketCorsRequest$Bucket": "

    Specifies the bucket impacted by the corsconfiguration.

    ", + "PutBucketEncryptionRequest$Bucket": "

    Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS (SSE-KMS). For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.

    ", "PutBucketInventoryConfigurationRequest$Bucket": "

    The name of the bucket where the inventory configuration will be stored.

    ", - "PutBucketLifecycleConfigurationRequest$Bucket": "

    ", + "PutBucketLifecycleConfigurationRequest$Bucket": "

    The name of the bucket for which to set the configuration.

    ", "PutBucketLifecycleRequest$Bucket": "

    ", - "PutBucketLoggingRequest$Bucket": "

    ", + "PutBucketLoggingRequest$Bucket": "

    The name of the bucket for which to set the logging parameters.

    ", "PutBucketMetricsConfigurationRequest$Bucket": "

    The name of the bucket for which the metrics configuration is set.

    ", - "PutBucketNotificationConfigurationRequest$Bucket": "

    ", - "PutBucketNotificationRequest$Bucket": "

    ", - "PutBucketPolicyRequest$Bucket": "

    ", - "PutBucketReplicationRequest$Bucket": "

    ", - "PutBucketRequestPaymentRequest$Bucket": "

    ", - "PutBucketTaggingRequest$Bucket": "

    ", - "PutBucketVersioningRequest$Bucket": "

    ", - "PutBucketWebsiteRequest$Bucket": "

    ", - "PutObjectAclRequest$Bucket": "

    ", + "PutBucketNotificationConfigurationRequest$Bucket": "

    The name of the bucket.

    ", + "PutBucketNotificationRequest$Bucket": "

    The name of the bucket.

    ", + "PutBucketPolicyRequest$Bucket": "

    The name of the bucket.

    ", + "PutBucketReplicationRequest$Bucket": "

    The name of the bucket

    ", + "PutBucketRequestPaymentRequest$Bucket": "

    The bucket name.

    ", + "PutBucketTaggingRequest$Bucket": "

    The bucket name.

    ", + "PutBucketVersioningRequest$Bucket": "

    The bucket name.

    ", + "PutBucketWebsiteRequest$Bucket": "

    The bucket name.

    ", + "PutObjectAclRequest$Bucket": "

    The name of the bucket to which the ACL is being added.

    ", "PutObjectLegalHoldRequest$Bucket": "

    The bucket containing the object that you want to place a Legal Hold on.

    ", - "PutObjectLockConfigurationRequest$Bucket": "

    The bucket whose object lock configuration you want to create or replace.

    ", + "PutObjectLockConfigurationRequest$Bucket": "

    The bucket whose Object Lock configuration you want to create or replace.

    ", "PutObjectRequest$Bucket": "

    Name of the bucket to which the PUT operation was initiated.

    ", "PutObjectRetentionRequest$Bucket": "

    The bucket that contains the object you want to apply this Object Retention configuration to.

    ", - "PutObjectTaggingRequest$Bucket": "

    ", + "PutObjectTaggingRequest$Bucket": "

    The bucket containing the object.

    ", "PutPublicAccessBlockRequest$Bucket": "

    The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want to set.

    ", - "RestoreObjectRequest$Bucket": "

    ", + "RestoreObjectRequest$Bucket": "

    The bucket name.

    ", "S3Location$BucketName": "

    The name of the bucket where the restore results will be placed.

    ", "SelectObjectContentRequest$Bucket": "

    The S3 bucket.

    ", - "UploadPartCopyRequest$Bucket": "

    ", + "UploadPartCopyRequest$Bucket": "

    The bucket name.

    ", "UploadPartRequest$Bucket": "

    Name of the bucket to which the multipart upload was initiated.

    " } }, @@ -430,15 +430,15 @@ "Buckets": { "base": null, "refs": { - "ListBucketsOutput$Buckets": "

    " + "ListBucketsOutput$Buckets": "

    The list of buckets owned by the requestor.

    " } }, "BypassGovernanceRetention": { "base": null, "refs": { - "DeleteObjectRequest$BypassGovernanceRetention": "

    Indicates whether Amazon S3 object lock should bypass governance-mode restrictions to process this operation.

    ", - "DeleteObjectsRequest$BypassGovernanceRetention": "

    Specifies whether you want to delete this object even if it has a Governance-type object lock in place. You must have sufficient permissions to perform this operation.

    ", - "PutObjectRetentionRequest$BypassGovernanceRetention": "

    Indicates whether this operation should bypass Governance-mode restrictions.j

    " + "DeleteObjectRequest$BypassGovernanceRetention": "

    Indicates whether S3 Object Lock should bypass Governance-mode restrictions to process this operation.

    ", + "DeleteObjectsRequest$BypassGovernanceRetention": "

    Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. You must have sufficient permissions to perform this operation.

    ", + "PutObjectRetentionRequest$BypassGovernanceRetention": "

    Indicates whether this operation should bypass Governance-mode restrictions.

    " } }, "BytesProcessed": { @@ -465,7 +465,7 @@ "CORSConfiguration": { "base": "

    Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

    ", "refs": { - "PutBucketCorsRequest$CORSConfiguration": "

    " + "PutBucketCorsRequest$CORSConfiguration": "

    Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.

    " } }, "CORSRule": { @@ -477,18 +477,18 @@ "CORSRules": { "base": null, "refs": { - "CORSConfiguration$CORSRules": "

    A set of allowed origins and methods.

    ", - "GetBucketCorsOutput$CORSRules": "

    " + "CORSConfiguration$CORSRules": "

    A set of origins and methods (cross-origin access that you want to allow). You can add up to 100 rules to the configuration.

    ", + "GetBucketCorsOutput$CORSRules": "

    A set of origins and methods (cross-origin access that you want to allow). You can add up to 100 rules to the configuration.

    " } }, "CSVInput": { - "base": "

    Describes how a CSV-formatted input object is formatted.

    ", + "base": "

    Describes how a uncompressed comma-separated values (CSV)-formatted input object is formatted.

    ", "refs": { "InputSerialization$CSV": "

    Describes the serialization of a CSV-encoded object.

    " } }, "CSVOutput": { - "base": "

    Describes how CSV-formatted results are formatted.

    ", + "base": "

    Describes how uncompressed comma-separated values (CSV)-formatted results are formatted.

    ", "refs": { "OutputSerialization$CSV": "

    Describes the serialization of CSV-encoded Select results.

    " } @@ -500,41 +500,41 @@ "CreateMultipartUploadRequest$CacheControl": "

    Specifies caching behavior along the request/reply chain.

    ", "GetObjectOutput$CacheControl": "

    Specifies caching behavior along the request/reply chain.

    ", "HeadObjectOutput$CacheControl": "

    Specifies caching behavior along the request/reply chain.

    ", - "PutObjectRequest$CacheControl": "

    Specifies caching behavior along the request/reply chain.

    " + "PutObjectRequest$CacheControl": "

    Can be used to specify caching behavior along the request/reply chain. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.

    " } }, "CloudFunction": { "base": null, "refs": { - "CloudFunctionConfiguration$CloudFunction": "

    " + "CloudFunctionConfiguration$CloudFunction": "

    Lambda cloud function ARN that Amazon S3 can invoke when it detects events of the specified type.

    " } }, "CloudFunctionConfiguration": { - "base": "

    ", + "base": "

    Container for specifying the AWS Lambda notification configuration.

    ", "refs": { - "NotificationConfigurationDeprecated$CloudFunctionConfiguration": "

    " + "NotificationConfigurationDeprecated$CloudFunctionConfiguration": "

    Container for specifying the AWS Lambda notification configuration.

    " } }, "CloudFunctionInvocationRole": { "base": null, "refs": { - "CloudFunctionConfiguration$InvocationRole": "

    " + "CloudFunctionConfiguration$InvocationRole": "

    The role supporting the invocation of the lambda function

    " } }, "Code": { "base": null, "refs": { - "Error$Code": "

    " + "Error$Code": "

    The error code is a string that uniquely identifies an error condition. It is meant to be read and understood by programs that detect and handle errors by type.

    Amazon S3 error codes

      • Code: AccessDenied

      • Description: Access Denied

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: AccountProblem

      • Description: There is a problem with your AWS account that prevents the operation from completing successfully. Contact AWS Support for further assistance.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: AllAccessDisabled

      • Description: All access to this Amazon S3 resource has been disabled. Contact AWS Support for further assistance.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: AmbiguousGrantByEmailAddress

      • Description: The email address you provided is associated with more than one account.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: AuthorizationHeaderMalformed

      • Description: The authorization header you provided is invalid.

      • HTTP Status Code: 400 Bad Request

      • HTTP Status Code: N/A

      • Code: BadDigest

      • Description: The Content-MD5 you specified did not match what we received.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: BucketAlreadyExists

      • Description: The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: BucketAlreadyOwnedByYou

      • Description: The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all AWS Regions except in the North Virginia region. For legacy compatibility, if you re-create an existing bucket that you already own in the North Virginia region, Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).

      • Code: 409 Conflict (in all regions except the North Virginia region)

      • SOAP Fault Code Prefix: Client

      • Code: BucketNotEmpty

      • Description: The bucket you tried to delete is not empty.

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: CredentialsNotSupported

      • Description: This request does not support credentials.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: CrossLocationLoggingProhibited

      • Description: Cross-location logging not allowed. Buckets in one geographic location cannot log information to a bucket in another location.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: EntityTooSmall

      • Description: Your proposed upload is smaller than the minimum allowed object size.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: EntityTooLarge

      • Description: Your proposed upload exceeds the maximum allowed object size.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: ExpiredToken

      • Description: The provided token has expired.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: IllegalVersioningConfigurationException

      • Description: Indicates that the versioning configuration specified in the request is invalid.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: IncompleteBody

      • Description: You did not provide the number of bytes specified by the Content-Length HTTP header

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: IncorrectNumberOfFilesInPostRequest

      • Description: POST requires exactly one file upload per request.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InlineDataTooLarge

      • Description: Inline data exceeds the maximum allowed size.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InternalError

      • Description: We encountered an internal error. Please try again.

      • HTTP Status Code: 500 Internal Server Error

      • SOAP Fault Code Prefix: Server

      • Code: InvalidAccessKeyId

      • Description: The AWS access key ID you provided does not exist in our records.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: InvalidAddressingHeader

      • Description: You must specify the Anonymous role.

      • HTTP Status Code: N/A

      • SOAP Fault Code Prefix: Client

      • Code: InvalidArgument

      • Description: Invalid Argument

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidBucketName

      • Description: The specified bucket is not valid.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidBucketState

      • Description: The request is not valid with the current state of the bucket.

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: InvalidDigest

      • Description: The Content-MD5 you specified is not valid.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidEncryptionAlgorithmError

      • Description: The encryption request you specified is not valid. The valid value is AES256.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidLocationConstraint

      • Description: The specified location constraint is not valid. For more information about Regions, see How to Select a Region for Your Buckets.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidObjectState

      • Description: The operation is not valid for the current state of the object.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: InvalidPart

      • Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidPartOrder

      • Description: The list of parts was not in ascending order. Parts list must be specified in order by part number.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidPayer

      • Description: All access to this object has been disabled. Please contact AWS Support for further assistance.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: InvalidPolicyDocument

      • Description: The content of the form does not meet the conditions specified in the policy document.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidRange

      • Description: The requested range cannot be satisfied.

      • HTTP Status Code: 416 Requested Range Not Satisfiable

      • SOAP Fault Code Prefix: Client

      • Code: InvalidRequest

      • Description: Please use AWS4-HMAC-SHA256.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: SOAP requests must be made over an HTTPS connection.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Acceleration is not supported for buckets with non-DNS compliant names.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Acceleration is not supported for buckets with periods (.) in their names.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style requests.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Accelerate is not configured on this bucket.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Accelerate is disabled on this bucket.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Acceleration is not supported on this bucket. Contact AWS Support for more information.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidRequest

      • Description: Amazon S3 Transfer Acceleration cannot be enabled on this bucket. Contact AWS Support for more information.

      • HTTP Status Code: 400 Bad Request

      • Code: N/A

      • Code: InvalidSecurity

      • Description: The provided security credentials are not valid.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: InvalidSOAPRequest

      • Description: The SOAP request body is invalid.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidStorageClass

      • Description: The storage class you specified is not valid.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidTargetBucketForLogging

      • Description: The target bucket for logging does not exist, is not owned by you, or does not have the appropriate grants for the log-delivery group.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidToken

      • Description: The provided token is malformed or otherwise invalid.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: InvalidURI

      • Description: Couldn't parse the specified URI.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: KeyTooLongError

      • Description: Your key is too long.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MalformedACLError

      • Description: The XML you provided was not well-formed or did not validate against our published schema.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MalformedPOSTRequest

      • Description: The body of your POST request is not well-formed multipart/form-data.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MalformedXML

      • Description: This happens when the user sends malformed XML (XML that doesn't conform to the published XSD) for the configuration. The error message is, \"The XML you provided was not well-formed or did not validate against our published schema.\"

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MaxMessageLengthExceeded

      • Description: Your request was too big.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MaxPostPreDataLengthExceededError

      • Description: Your POST request fields preceding the upload file were too large.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MetadataTooLarge

      • Description: Your metadata headers exceed the maximum allowed metadata size.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MethodNotAllowed

      • Description: The specified method is not allowed against this resource.

      • HTTP Status Code: 405 Method Not Allowed

      • SOAP Fault Code Prefix: Client

      • Code: MissingAttachment

      • Description: A SOAP attachment was expected, but none were found.

      • HTTP Status Code: N/A

      • SOAP Fault Code Prefix: Client

      • Code: MissingContentLength

      • Description: You must provide the Content-Length HTTP header.

      • HTTP Status Code: 411 Length Required

      • SOAP Fault Code Prefix: Client

      • Code: MissingRequestBodyError

      • Description: This happens when the user sends an empty XML document as a request. The error message is, \"Request body is empty.\"

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MissingSecurityElement

      • Description: The SOAP 1.1 request is missing a security element.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: MissingSecurityHeader

      • Description: Your request is missing a required header.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: NoLoggingStatusForKey

      • Description: There is no such thing as a logging status subresource for a key.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: NoSuchBucket

      • Description: The specified bucket does not exist.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

      • Code: NoSuchBucketPolicy

      • Description: The specified bucket does not have a bucket policy.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

      • Code: NoSuchKey

      • Description: The specified key does not exist.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

      • Code: NoSuchLifecycleConfiguration

      • Description: The lifecycle configuration does not exist.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

      • Code: NoSuchUpload

      • Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

      • Code: NoSuchVersion

      • Description: Indicates that the version ID specified in the request does not match an existing version.

      • HTTP Status Code: 404 Not Found

      • SOAP Fault Code Prefix: Client

      • Code: NotImplemented

      • Description: A header you provided implies functionality that is not implemented.

      • HTTP Status Code: 501 Not Implemented

      • SOAP Fault Code Prefix: Server

      • Code: NotSignedUp

      • Description: Your account is not signed up for the Amazon S3 service. You must sign up before you can use Amazon S3. You can sign up at the following URL: https://aws.amazon.com/s3

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: OperationAborted

      • Description: A conflicting conditional operation is currently in progress against this resource. Try again.

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: PermanentRedirect

      • Description: The bucket you are attempting to access must be addressed using the specified endpoint. Send all future requests to this endpoint.

      • HTTP Status Code: 301 Moved Permanently

      • SOAP Fault Code Prefix: Client

      • Code: PreconditionFailed

      • Description: At least one of the preconditions you specified did not hold.

      • HTTP Status Code: 412 Precondition Failed

      • SOAP Fault Code Prefix: Client

      • Code: Redirect

      • Description: Temporary redirect.

      • HTTP Status Code: 307 Moved Temporarily

      • SOAP Fault Code Prefix: Client

      • Code: RestoreAlreadyInProgress

      • Description: Object restore is already in progress.

      • HTTP Status Code: 409 Conflict

      • SOAP Fault Code Prefix: Client

      • Code: RequestIsNotMultiPartContent

      • Description: Bucket POST must be of the enclosure-type multipart/form-data.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: RequestTimeout

      • Description: Your socket connection to the server was not read from or written to within the timeout period.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: RequestTimeTooSkewed

      • Description: The difference between the request time and the server's time is too large.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: RequestTorrentOfBucketError

      • Description: Requesting the torrent file of a bucket is not permitted.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: SignatureDoesNotMatch

      • Description: The request signature we calculated does not match the signature you provided. Check your AWS secret access key and signing method. For more information, see REST Authentication and SOAP Authentication for details.

      • HTTP Status Code: 403 Forbidden

      • SOAP Fault Code Prefix: Client

      • Code: ServiceUnavailable

      • Description: Reduce your request rate.

      • HTTP Status Code: 503 Service Unavailable

      • SOAP Fault Code Prefix: Server

      • Code: SlowDown

      • Description: Reduce your request rate.

      • HTTP Status Code: 503 Slow Down

      • SOAP Fault Code Prefix: Server

      • Code: TemporaryRedirect

      • Description: You are being redirected to the bucket while DNS updates.

      • HTTP Status Code: 307 Moved Temporarily

      • SOAP Fault Code Prefix: Client

      • Code: TokenRefreshRequired

      • Description: The provided token must be refreshed.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: TooManyBuckets

      • Description: You have attempted to create more buckets than allowed.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: UnexpectedContent

      • Description: This request does not support content.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: UnresolvableGrantByEmailAddress

      • Description: The email address you provided does not match any account on record.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

      • Code: UserKeyMustBeSpecified

      • Description: The bucket POST must contain the specified field name. If it is specified, check the order of the fields.

      • HTTP Status Code: 400 Bad Request

      • SOAP Fault Code Prefix: Client

    " } }, "Comments": { "base": null, "refs": { - "CSVInput$Comments": "

    The single character used to indicate a row should be ignored when present at the start of a row.

    " + "CSVInput$Comments": "

    A single character used to indicate that a row should be ignored when the character is present at the start of that row. You can specify any character to indicate a comment line.

    " } }, "CommonPrefix": { - "base": "

    ", + "base": "

    Container for all (if there are any) keys between Prefix and the next occurrence of the string specified by a delimiter. CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix. For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/.

    ", "refs": { "CommonPrefixList$member": null } @@ -542,10 +542,10 @@ "CommonPrefixList": { "base": null, "refs": { - "ListMultipartUploadsOutput$CommonPrefixes": "

    ", - "ListObjectVersionsOutput$CommonPrefixes": "

    ", - "ListObjectsOutput$CommonPrefixes": "

    ", - "ListObjectsV2Output$CommonPrefixes": "

    CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by delimiter

    " + "ListMultipartUploadsOutput$CommonPrefixes": "

    If you specify a delimiter in the request, then the result returns each distinct key prefix containing the delimiter in a CommonPrefixes element. The distinct key prefixes are returned in the Prefix child element.

    ", + "ListObjectVersionsOutput$CommonPrefixes": "

    All of the keys rolled up into a common prefix count as a single return when calculating the number of returns.

    ", + "ListObjectsOutput$CommonPrefixes": "

    All of the keys rolled up in a common prefix count as a single return when calculating the number of returns.

    A response can contain CommonPrefixes only if you specify a delimiter.

    CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by the delimiter.

    CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.

    For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.

    ", + "ListObjectsV2Output$CommonPrefixes": "

    All of the keys rolled up into a common prefix count as a single return when calculating the number of returns.

    A response can contain CommonPrefixes only if you specify a delimiter.

    CommonPrefixes contains all (if there are any) keys between Prefix and the next occurrence of the string specified by a delimiter.

    CommonPrefixes lists keys that act like subdirectories in the directory specified by Prefix.

    For example, if the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common prefix is notes/summer/. All of the keys that roll up into a common prefix count as a single return when calculating the number of returns.

    " } }, "CompleteMultipartUploadOutput": { @@ -559,13 +559,13 @@ } }, "CompletedMultipartUpload": { - "base": "

    ", + "base": "

    The container for the completed multipart upload details.

    ", "refs": { - "CompleteMultipartUploadRequest$MultipartUpload": "

    " + "CompleteMultipartUploadRequest$MultipartUpload": "

    The container for the multipart upload request information.

    " } }, "CompletedPart": { - "base": "

    ", + "base": "

    Details of the parts that were uploaded.

    ", "refs": { "CompletedPartList$member": null } @@ -573,7 +573,7 @@ "CompletedPartList": { "base": null, "refs": { - "CompletedMultipartUpload$Parts": "

    " + "CompletedMultipartUpload$Parts": "

    Array of CompletedPart data types.

    " } }, "CompressionType": { @@ -583,7 +583,7 @@ } }, "Condition": { - "base": "

    Specifies a condition that must be met for a redirect to apply.

    ", + "base": "

    A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error.

    ", "refs": { "RoutingRule$Condition": "

    A container for describing a condition that must be met for the specified redirect to apply. For example, 1. If request is for pages in the /docs folder, redirect to the /documents folder. 2. If request results in HTTP error 4xx, redirect request to another host where you might process the error.

    " } @@ -601,7 +601,7 @@ "CreateMultipartUploadRequest$ContentDisposition": "

    Specifies presentational information for the object.

    ", "GetObjectOutput$ContentDisposition": "

    Specifies presentational information for the object.

    ", "HeadObjectOutput$ContentDisposition": "

    Specifies presentational information for the object.

    ", - "PutObjectRequest$ContentDisposition": "

    Specifies presentational information for the object.

    " + "PutObjectRequest$ContentDisposition": "

    Specifies presentational information for the object. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1.

    " } }, "ContentEncoding": { @@ -611,7 +611,7 @@ "CreateMultipartUploadRequest$ContentEncoding": "

    Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.

    ", "GetObjectOutput$ContentEncoding": "

    Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.

    ", "HeadObjectOutput$ContentEncoding": "

    Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.

    ", - "PutObjectRequest$ContentEncoding": "

    Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field.

    " + "PutObjectRequest$ContentEncoding": "

    Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11.

    " } }, "ContentLanguage": { @@ -629,31 +629,31 @@ "refs": { "GetObjectOutput$ContentLength": "

    Size of the body in bytes.

    ", "HeadObjectOutput$ContentLength": "

    Size of the body in bytes.

    ", - "PutObjectRequest$ContentLength": "

    Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.

    ", + "PutObjectRequest$ContentLength": "

    Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13.

    ", "UploadPartRequest$ContentLength": "

    Size of the body in bytes. This parameter is useful when the size of the body cannot be determined automatically.

    " } }, "ContentMD5": { "base": null, "refs": { - "PutBucketAclRequest$ContentMD5": "

    ", - "PutBucketCorsRequest$ContentMD5": "

    ", + "PutBucketAclRequest$ContentMD5": "

    The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.

    ", + "PutBucketCorsRequest$ContentMD5": "

    The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.

    ", "PutBucketEncryptionRequest$ContentMD5": "

    The base64-encoded 128-bit MD5 digest of the server-side encryption configuration. This parameter is auto-populated when using the command from the CLI.

    ", "PutBucketLifecycleRequest$ContentMD5": "

    ", - "PutBucketLoggingRequest$ContentMD5": "

    ", - "PutBucketNotificationRequest$ContentMD5": "

    ", - "PutBucketPolicyRequest$ContentMD5": "

    ", - "PutBucketReplicationRequest$ContentMD5": "

    The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit.

    ", - "PutBucketRequestPaymentRequest$ContentMD5": "

    ", - "PutBucketTaggingRequest$ContentMD5": "

    ", - "PutBucketVersioningRequest$ContentMD5": "

    ", - "PutBucketWebsiteRequest$ContentMD5": "

    ", - "PutObjectAclRequest$ContentMD5": "

    ", + "PutBucketLoggingRequest$ContentMD5": "

    The MD5 hash of the PutBucketLogging request body.

    ", + "PutBucketNotificationRequest$ContentMD5": "

    The MD5 hash of the PutPublicAccessBlock request body.

    ", + "PutBucketPolicyRequest$ContentMD5": "

    The MD5 hash of the request body.

    ", + "PutBucketReplicationRequest$ContentMD5": "

    The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

    ", + "PutBucketRequestPaymentRequest$ContentMD5": "

    >The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

    ", + "PutBucketTaggingRequest$ContentMD5": "

    The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

    ", + "PutBucketVersioningRequest$ContentMD5": "

    >The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

    ", + "PutBucketWebsiteRequest$ContentMD5": "

    The base64-encoded 128-bit MD5 digest of the data. You must use this header as a message integrity check to verify that the request body was not corrupted in transit. For more information, see RFC 1864.

    ", + "PutObjectAclRequest$ContentMD5": "

    The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message integrity check to verify that the request body was not corrupted in transit. For more information, go to RFC 1864.>

    ", "PutObjectLegalHoldRequest$ContentMD5": "

    The MD5 hash for the request body.

    ", "PutObjectLockConfigurationRequest$ContentMD5": "

    The MD5 hash for the request body.

    ", - "PutObjectRequest$ContentMD5": "

    The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameted is required if object lock parameters are specified.

    ", + "PutObjectRequest$ContentMD5": "

    The base64-encoded 128-bit MD5 digest of the message (without the headers) according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same data that was originally sent. Although it is optional, we recommend using the Content-MD5 mechanism as an end-to-end integrity check. For more information about REST request authentication, see REST Authentication.

    ", "PutObjectRetentionRequest$ContentMD5": "

    The MD5 hash for the request body.

    ", - "PutObjectTaggingRequest$ContentMD5": "

    ", + "PutObjectTaggingRequest$ContentMD5": "

    The MD5 hash for the request body.

    ", "PutPublicAccessBlockRequest$ContentMD5": "

    The MD5 hash of the PutPublicAccessBlock request body.

    ", "UploadPartRequest$ContentMD5": "

    The base64-encoded 128-bit MD5 digest of the part data. This parameter is auto-populated when using the command from the CLI. This parameted is required if object lock parameters are specified.

    " } @@ -671,7 +671,7 @@ "CreateMultipartUploadRequest$ContentType": "

    A standard MIME type describing the format of the object data.

    ", "GetObjectOutput$ContentType": "

    A standard MIME type describing the format of the object data.

    ", "HeadObjectOutput$ContentType": "

    A standard MIME type describing the format of the object data.

    ", - "PutObjectRequest$ContentType": "

    A standard MIME type describing the format of the object data.

    " + "PutObjectRequest$ContentType": "

    A standard MIME type describing the format of the contents. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17.

    " } }, "ContinuationEvent": { @@ -691,15 +691,15 @@ } }, "CopyObjectResult": { - "base": "

    ", + "base": "

    >Container for all response elements.

    ", "refs": { - "CopyObjectOutput$CopyObjectResult": "

    " + "CopyObjectOutput$CopyObjectResult": "

    Container for all response elements.

    " } }, "CopyPartResult": { - "base": "

    ", + "base": "

    Container for all response elements.

    ", "refs": { - "UploadPartCopyOutput$CopyPartResult": "

    " + "UploadPartCopyOutput$CopyPartResult": "

    Container for all response elements.

    " } }, "CopySource": { @@ -767,14 +767,14 @@ "CopySourceVersionId": { "base": null, "refs": { - "CopyObjectOutput$CopySourceVersionId": "

    ", + "CopyObjectOutput$CopySourceVersionId": "

    Version of the copied object in the destination bucket.

    ", "UploadPartCopyOutput$CopySourceVersionId": "

    The version of the source object that was copied, if you have enabled versioning on the source bucket.

    " } }, "CreateBucketConfiguration": { - "base": "

    ", + "base": "

    The configuration information for the bucket.

    ", "refs": { - "CreateBucketRequest$CreateBucketConfiguration": "

    " + "CreateBucketRequest$CreateBucketConfiguration": "

    The configuration information for the bucket.

    " } }, "CreateBucketOutput": { @@ -807,7 +807,7 @@ "base": null, "refs": { "LifecycleExpiration$Date": "

    Indicates at what date the object is to be moved or deleted. Should be in GMT ISO 8601 Format.

    ", - "ObjectLockRetention$RetainUntilDate": "

    The date on which this object lock retention expires.

    ", + "ObjectLockRetention$RetainUntilDate": "

    The date on which this Object Lock Retention will expire.

    ", "Transition$Date": "

    Indicates when objects are transitioned to the specified storage class. The date value must be in ISO 8601 format. The time is always midnight UTC.

    " } }, @@ -829,15 +829,15 @@ } }, "DefaultRetention": { - "base": "

    The container element for specifying the default object lock retention settings for new objects placed in the specified bucket.

    ", + "base": "

    The container element for specifying the default Object Lock retention settings for new objects placed in the specified bucket.

    ", "refs": { "ObjectLockRule$DefaultRetention": "

    The default retention period that you want to apply to new objects placed in the specified bucket.

    " } }, "Delete": { - "base": "

    ", + "base": "

    Container for the objects to delete.

    ", "refs": { - "DeleteObjectsRequest$Delete": "

    " + "DeleteObjectsRequest$Delete": "

    Container for the request.

    " } }, "DeleteBucketAnalyticsConfigurationRequest": { @@ -899,39 +899,39 @@ "base": null, "refs": { "DeleteObjectOutput$DeleteMarker": "

    Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker.

    ", - "DeletedObject$DeleteMarker": "

    ", + "DeletedObject$DeleteMarker": "

    Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker. In a simple DELETE, this header indicates whether (true) or not (false) a delete marker was created.

    ", "GetObjectOutput$DeleteMarker": "

    Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.

    ", "HeadObjectOutput$DeleteMarker": "

    Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.

    " } }, "DeleteMarkerEntry": { - "base": "

    ", + "base": "

    Information about the delete marker.

    ", "refs": { "DeleteMarkers$member": null } }, "DeleteMarkerReplication": { - "base": "

    Specifies whether Amazon S3 should replicate delete makers.

    ", + "base": "

    Specifies whether Amazon S3 replicates the delete markers. If you specify a Filter, you must specify this element. However, in the latest version of replication configuration (when Filter is specified), Amazon S3 doesn't replicate delete markers. Therefore, the DeleteMarkerReplication element can contain only <Status>Disabled</Status>. For an example configuration, see Basic Rule Configuration.

    If you don't specify the Filter element, Amazon S3 assumes the replication configuration is the earlier version, V1. In the earlier version, Amazon S3 handled replication of delete markers differently. For more information, see Backward Compatibility.

    ", "refs": { - "ReplicationRule$DeleteMarkerReplication": "

    " + "ReplicationRule$DeleteMarkerReplication": null } }, "DeleteMarkerReplicationStatus": { "base": null, "refs": { - "DeleteMarkerReplication$Status": "

    The status of the delete marker replication.

    In the current implementation, Amazon S3 doesn't replicate the delete markers. The status must be Disabled.

    " + "DeleteMarkerReplication$Status": "

    Indicates whether to replicate delete markers.

    In the current implementation, Amazon S3 doesn't replicate the delete markers. The status must be Disabled.

    " } }, "DeleteMarkerVersionId": { "base": null, "refs": { - "DeletedObject$DeleteMarkerVersionId": "

    " + "DeletedObject$DeleteMarkerVersionId": "

    The version ID of the delete marker created as a result of the DELETE operation. If you delete a specific object version, the value returned by this header is the version ID of the object version deleted.

    " } }, "DeleteMarkers": { "base": null, "refs": { - "ListObjectVersionsOutput$DeleteMarkers": "

    " + "ListObjectVersionsOutput$DeleteMarkers": "

    Container for an object that is a delete marker.

    " } }, "DeleteObjectOutput": { @@ -970,7 +970,7 @@ } }, "DeletedObject": { - "base": "

    ", + "base": "

    Information about the deleted object.

    ", "refs": { "DeletedObjects$member": null } @@ -978,19 +978,19 @@ "DeletedObjects": { "base": null, "refs": { - "DeleteObjectsOutput$Deleted": "

    " + "DeleteObjectsOutput$Deleted": "

    Container element for a successful delete. It identifies the object that was successfully deleted.

    " } }, "Delimiter": { "base": null, "refs": { - "ListMultipartUploadsOutput$Delimiter": "

    ", - "ListMultipartUploadsRequest$Delimiter": "

    Character you use to group keys.

    ", - "ListObjectVersionsOutput$Delimiter": "

    ", - "ListObjectVersionsRequest$Delimiter": "

    A delimiter is a character you use to group keys.

    ", - "ListObjectsOutput$Delimiter": "

    ", + "ListMultipartUploadsOutput$Delimiter": "

    Contains the delimiter you specified in the request. If you don't specify a delimiter in your request, this element is absent from the response.

    ", + "ListMultipartUploadsRequest$Delimiter": "

    Character you use to group keys.

    All keys that contain the same string between the prefix, if specified, and the first occurrence of the delimiter after the prefix are grouped under a single result element, CommonPrefixes. If you don't specify the prefix parameter, then the substring starts at the beginning of the key. The keys that are grouped under CommonPrefixes result element are not returned elsewhere in the response.

    ", + "ListObjectVersionsOutput$Delimiter": "

    The delimeter grouping the included keys. A delimiter is a character that you specify to group keys. All keys that contain the same string between the prefix and the first occurrence of the delimiter are grouped under a single result element in CommonPrefixes. These groups are counted as one result against the max-keys limitation. These keys are not returned elsewhere in the response.

    ", + "ListObjectVersionsRequest$Delimiter": "

    A delimiter is a character that you specify to group keys. All keys that contain the same string between the prefix and the first occurrence of the delimiter are grouped under a single result element in CommonPrefixes. These groups are counted as one result against the max-keys limitation. These keys are not returned elsewhere in the response.

    ", + "ListObjectsOutput$Delimiter": "

    Causes keys that contain the same string between the prefix and the first occurrence of the delimiter to be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. Each rolled-up result counts as only one return against the MaxKeys value.

    ", "ListObjectsRequest$Delimiter": "

    A delimiter is a character you use to group keys.

    ", - "ListObjectsV2Output$Delimiter": "

    A delimiter is a character you use to group keys.

    ", + "ListObjectsV2Output$Delimiter": "

    Causes keys that contain the same string between the prefix and the first occurrence of the delimiter to be rolled up into a single result element in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere in the response. Each rolled-up result counts as only one return against the MaxKeys value.

    ", "ListObjectsV2Request$Delimiter": "

    A delimiter is a character you use to group keys.

    " } }, @@ -1011,20 +1011,20 @@ "refs": { "Grantee$DisplayName": "

    Screen name of the grantee.

    ", "Initiator$DisplayName": "

    Name of the Principal.

    ", - "Owner$DisplayName": "

    " + "Owner$DisplayName": "

    Container for the display name of the owner

    " } }, "ETag": { "base": null, "refs": { - "CompleteMultipartUploadOutput$ETag": "

    Entity tag of the object.

    ", + "CompleteMultipartUploadOutput$ETag": "

    Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits.

    ", "CompletedPart$ETag": "

    Entity tag returned when the part was uploaded.

    ", - "CopyObjectResult$ETag": "

    ", + "CopyObjectResult$ETag": "

    Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata. The source and destination ETag is identical for a successfully copied object.

    ", "CopyPartResult$ETag": "

    Entity tag of the object.

    ", "GetObjectOutput$ETag": "

    An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL

    ", "HeadObjectOutput$ETag": "

    An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL

    ", - "Object$ETag": "

    ", - "ObjectVersion$ETag": "

    ", + "Object$ETag": "

    The entity tag is an MD5 hash of the object. ETag reflects only changes to the contents of an object, not its metadata.

    ", + "ObjectVersion$ETag": "

    The entity tag is an MD5 hash of that version of the object

    ", "Part$ETag": "

    Entity tag returned when the part was uploaded.

    ", "PutObjectOutput$ETag": "

    Entity tag for the uploaded object.

    ", "UploadPartOutput$ETag": "

    Entity tag for the uploaded object.

    " @@ -1045,20 +1045,20 @@ "EncodingType": { "base": "

    Requests Amazon S3 to encode the object keys in the response and specifies the encoding method to use. An object key may contain any Unicode character; however, XML 1.0 parser cannot parse some characters, such as characters with an ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you can add this parameter to request that Amazon S3 encode the keys in the response.

    ", "refs": { - "ListMultipartUploadsOutput$EncodingType": "

    Encoding type used by Amazon S3 to encode object keys in the response.

    ", + "ListMultipartUploadsOutput$EncodingType": "

    Encoding type used by Amazon S3 to encode object keys in the response.

    If you specify encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:

    Delimiter, KeyMarker, Prefix, NextKeyMarker, Key.

    ", "ListMultipartUploadsRequest$EncodingType": null, - "ListObjectVersionsOutput$EncodingType": "

    Encoding type used by Amazon S3 to encode object keys in the response.

    ", + "ListObjectVersionsOutput$EncodingType": "

    Encoding type used by Amazon S3 to encode object key names in the XML response.

    If you specify encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:

    KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter.

    ", "ListObjectVersionsRequest$EncodingType": null, "ListObjectsOutput$EncodingType": "

    Encoding type used by Amazon S3 to encode object keys in the response.

    ", "ListObjectsRequest$EncodingType": null, - "ListObjectsV2Output$EncodingType": "

    Encoding type used by Amazon S3 to encode object keys in the response.

    ", + "ListObjectsV2Output$EncodingType": "

    Encoding type used by Amazon S3 to encode object key names in the XML response.

    If you specify the encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:

    Delimiter, Prefix, Key, and StartAfter.

    ", "ListObjectsV2Request$EncodingType": "

    Encoding type used by Amazon S3 to encode object keys in the response.

    " } }, "Encryption": { - "base": "

    Describes the server-side encryption that will be applied to the restore results.

    ", + "base": "

    Contains the type of server-side encryption used.

    ", "refs": { - "S3Location$Encryption": "

    " + "S3Location$Encryption": null } }, "EncryptionConfiguration": { @@ -1070,32 +1070,32 @@ "End": { "base": null, "refs": { - "ScanRange$End": "

    Specifies the end of the byte range. This parameter is optional. Valid values: non-negative integers. The default value is one less than the size of the object being queried.

    " + "ScanRange$End": "

    Specifies the end of the byte range. This parameter is optional. Valid values: non-negative integers. The default value is one less than the size of the object being queried. If only the End parameter is supplied, it is interpreted to mean scan the last N bytes of the file. For example; <scanrange><end>50</end></scanrange> means scan the last 50 bytes.

    " } }, "EndEvent": { - "base": "

    ", + "base": "

    A message that indicates the request is complete and no more messages will be sent. You should not assume that the request is complete until the client receives an EndEvent.

    ", "refs": { "SelectObjectContentEventStream$End": "

    The End Event.

    " } }, "Error": { - "base": "

    ", + "base": "

    Container for all error elements.

    ", "refs": { "Errors$member": null } }, "ErrorDocument": { - "base": "

    ", + "base": "

    The error information.

    ", "refs": { - "GetBucketWebsiteOutput$ErrorDocument": "

    ", + "GetBucketWebsiteOutput$ErrorDocument": "

    The name of the error document for the website.

    ", "WebsiteConfiguration$ErrorDocument": "

    The name of the error document for the website.

    " } }, "Errors": { "base": null, "refs": { - "DeleteObjectsOutput$Errors": "

    " + "DeleteObjectsOutput$Errors": "

    Container for a failed delete operation that describes the object that Amazon S3 attempted to delete and the error it encountered.

    " } }, "Event": { @@ -1110,12 +1110,24 @@ "EventList": { "base": null, "refs": { - "CloudFunctionConfiguration$Events": "

    ", + "CloudFunctionConfiguration$Events": "

    Bucket events for which to send notifications.

    ", "LambdaFunctionConfiguration$Events": "

    The Amazon S3 bucket event for which to invoke the AWS Lambda function. For more information, see Supported Event Types in the Amazon Simple Storage Service Developer Guide.

    ", - "QueueConfiguration$Events": "

    ", - "QueueConfigurationDeprecated$Events": "

    ", + "QueueConfiguration$Events": "

    A collection of bucket events for which to send notiications

    ", + "QueueConfigurationDeprecated$Events": "

    A collection of bucket events for which to send notiications

    ", "TopicConfiguration$Events": "

    The Amazon S3 bucket event about which to send notifications. For more information, see Supported Event Types in the Amazon Simple Storage Service Developer Guide.

    ", - "TopicConfigurationDeprecated$Events": "

    " + "TopicConfigurationDeprecated$Events": "

    A collection of events related to objects

    " + } + }, + "ExistingObjectReplication": { + "base": "

    A container that specifies information about existing object replication. You can choose whether to enable or disable the replication of existing objects.

    ", + "refs": { + "ReplicationRule$ExistingObjectReplication": "

    A container that specifies information about existing object replication. You can choose whether to enable or disable the replication of existing objects.

    " + } + }, + "ExistingObjectReplicationStatus": { + "base": null, + "refs": { + "ExistingObjectReplication$Status": "

    Specifies whether existing object replication is enabled.

    " } }, "Expiration": { @@ -1125,7 +1137,7 @@ "CopyObjectOutput$Expiration": "

    If the object expiration is configured, the response includes this header.

    ", "GetObjectOutput$Expiration": "

    If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.

    ", "HeadObjectOutput$Expiration": "

    If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key value pairs providing object expiration information. The value of the rule-id is URL encoded.

    ", - "PutObjectOutput$Expiration": "

    If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.

    " + "PutObjectOutput$Expiration": "

    If the expiration is configured for the object (see PutBucketLifecycleConfiguration), the response includes this header. It includes the expiry-date and rule-id key-value pairs that provide information about object expiration. The value of the rule-id is URL encoded.

    " } }, "ExpirationStatus": { @@ -1148,7 +1160,7 @@ "CreateMultipartUploadRequest$Expires": "

    The date and time at which the object is no longer cacheable.

    ", "GetObjectOutput$Expires": "

    The date and time at which the object is no longer cacheable.

    ", "HeadObjectOutput$Expires": "

    The date and time at which the object is no longer cacheable.

    ", - "PutObjectRequest$Expires": "

    The date and time at which the object is no longer cacheable.

    " + "PutObjectRequest$Expires": "

    The date and time at which the object is no longer cacheable. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21.

    " } }, "ExposeHeader": { @@ -1186,14 +1198,14 @@ "FieldDelimiter": { "base": null, "refs": { - "CSVInput$FieldDelimiter": "

    The value used to separate individual fields in a record.

    ", - "CSVOutput$FieldDelimiter": "

    The value used to separate individual fields in a record.

    " + "CSVInput$FieldDelimiter": "

    A single character used to separate individual fields in a record. You can specify an arbitrary delimiter.

    ", + "CSVOutput$FieldDelimiter": "

    The value used to separate individual fields in a record. You can specify an arbitrary delimiter.

    " } }, "FileHeaderInfo": { "base": null, "refs": { - "CSVInput$FileHeaderInfo": "

    Describes the first line of input. Valid values: None, Ignore, Use.

    " + "CSVInput$FileHeaderInfo": "

    Describes the first line of input. Valid values are:

    • NONE: First line is not a header.

    • IGNORE: First line is a header, but you can't use the header values to indicate the column in an expression. You can use column position (such as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s).

    • Use: First line is a header, and you can use the header value to identify a column in an expression (SELECT \"name\" FROM OBJECT).

    " } }, "FilterRule": { @@ -1205,7 +1217,7 @@ "FilterRuleList": { "base": "

    A list of containers for the key value pair that defines the criteria for the filter rule.

    ", "refs": { - "S3KeyFilter$FilterRules": "

    " + "S3KeyFilter$FilterRules": null } }, "FilterRuleName": { @@ -1486,13 +1498,13 @@ } }, "GlacierJobParameters": { - "base": "

    ", + "base": "

    Container for Glacier job parameters.

    ", "refs": { "RestoreRequest$GlacierJobParameters": "

    Glacier related parameters pertaining to this job. Do not use with restores that specify OutputLocation.

    " } }, "Grant": { - "base": "

    ", + "base": "

    Container for grant information.

    ", "refs": { "Grants$member": null } @@ -1550,10 +1562,10 @@ } }, "Grantee": { - "base": "

    ", + "base": "

    Container for the person being granted permissions.

    ", "refs": { - "Grant$Grantee": "

    ", - "TargetGrant$Grantee": "

    " + "Grant$Grantee": "

    The person being granted permissions.

    ", + "TargetGrant$Grantee": "

    Container for the person being granted permissions.

    " } }, "Grants": { @@ -1605,7 +1617,7 @@ "Grantee$ID": "

    The canonical user ID of the grantee.

    ", "Initiator$ID": "

    If the principal is an AWS account, it provides the Canonical User ID. If the principal is an IAM User, it provides a user ARN value.

    ", "LifecycleRule$ID": "

    Unique identifier for the rule. The value cannot be longer than 255 characters.

    ", - "Owner$ID": "

    ", + "Owner$ID": "

    Container for the ID of the owner

    ", "ReplicationRule$ID": "

    A unique identifier for the rule. The maximum value is 255 characters.

    ", "Rule$ID": "

    Unique identifier for the rule. The value can't be longer than 255 characters.

    " } @@ -1639,9 +1651,9 @@ } }, "IndexDocument": { - "base": "

    ", + "base": "

    Container for the Suffix element.

    ", "refs": { - "GetBucketWebsiteOutput$IndexDocument": "

    ", + "GetBucketWebsiteOutput$IndexDocument": "

    The name of the index document for the website.

    ", "WebsiteConfiguration$IndexDocument": "

    The name of the index document for the website.

    " } }, @@ -1652,9 +1664,9 @@ } }, "Initiator": { - "base": "

    ", + "base": "

    Container element that identifies who initiated the ultipart upload.

    ", "refs": { - "ListPartsOutput$Initiator": "

    Identifies who initiated the multipart upload.

    ", + "ListPartsOutput$Initiator": "

    Container element that identifies who initiated the multipart upload. If the initiator is an AWS account, this element provides the same information as the Owner element. If the initiator is an IAM User, then this element provides the user ARN and display name.

    ", "MultipartUpload$Initiator": "

    Identifies who initiated the multipart upload.

    " } }, @@ -1680,7 +1692,7 @@ } }, "InventoryDestination": { - "base": "

    ", + "base": "

    Specifies the inventory configuration for an Amazon S3 bucket.

    ", "refs": { "InventoryConfiguration$Destination": "

    Contains information about where to publish the inventory results.

    " } @@ -1692,7 +1704,7 @@ } }, "InventoryFilter": { - "base": "

    ", + "base": "

    Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria.

    ", "refs": { "InventoryConfiguration$Filter": "

    Specifies an inventory filter. The inventory only includes objects that meet the filter's criteria.

    " } @@ -1737,13 +1749,13 @@ } }, "InventoryS3BucketDestination": { - "base": "

    ", + "base": "

    Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published.

    ", "refs": { "InventoryDestination$S3BucketDestination": "

    Contains the bucket name, file format, bucket owner (optional), and prefix (optional) where inventory results are published.

    " } }, "InventorySchedule": { - "base": "

    ", + "base": "

    Specifies the schedule for generating inventory results.

    ", "refs": { "InventoryConfiguration$Schedule": "

    Specifies the schedule for generating inventory results.

    " } @@ -1771,23 +1783,23 @@ "base": null, "refs": { "ListBucketAnalyticsConfigurationsOutput$IsTruncated": "

    Indicates whether the returned list of analytics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.

    ", - "ListBucketInventoryConfigurationsOutput$IsTruncated": "

    Indicates whether the returned list of inventory configurations is truncated in this response. A value of true indicates that the list is truncated.

    ", + "ListBucketInventoryConfigurationsOutput$IsTruncated": "

    Tells whether the returned list of inventory configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken is provided for a subsequent request.

    ", "ListBucketMetricsConfigurationsOutput$IsTruncated": "

    Indicates whether the returned list of metrics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.

    ", "ListMultipartUploadsOutput$IsTruncated": "

    Indicates whether the returned list of multipart uploads is truncated. A value of true indicates that the list was truncated. The list can be truncated if the number of multipart uploads exceeds the limit allowed or specified by max uploads.

    ", "ListObjectVersionsOutput$IsTruncated": "

    A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria. If your results were truncated, you can make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker response parameters as a starting place in another request to return the rest of the results.

    ", "ListObjectsOutput$IsTruncated": "

    A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.

    ", - "ListObjectsV2Output$IsTruncated": "

    A flag that indicates whether or not Amazon S3 returned all of the results that satisfied the search criteria.

    ", - "ListPartsOutput$IsTruncated": "

    Indicates whether the returned list of parts is truncated.

    " + "ListObjectsV2Output$IsTruncated": "

    Set to false if all of the results were returned. Set to true if more keys are available to return. If the number of results exceeds that specified by MaxKeys, all of the results might not be returned.

    ", + "ListPartsOutput$IsTruncated": "

    Indicates whether the returned list of parts is truncated. A true value indicates that the list was truncated. A list can be truncated if the number of parts exceeds the limit returned in the MaxParts element.

    " } }, "JSONInput": { - "base": "

    ", + "base": "

    Specifies JSON as object's input serialization format.

    ", "refs": { "InputSerialization$JSON": "

    Specifies JSON as object's input serialization format.

    " } }, "JSONOutput": { - "base": "

    ", + "base": "

    Specifies JSON as request's output serialization format.

    ", "refs": { "OutputSerialization$JSON": "

    Specifies JSON as request's output serialization format.

    " } @@ -1814,7 +1826,7 @@ "base": null, "refs": { "ListMultipartUploadsOutput$KeyMarker": "

    The key at or after which the listing began.

    ", - "ListMultipartUploadsRequest$KeyMarker": "

    Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.

    ", + "ListMultipartUploadsRequest$KeyMarker": "

    Together with upload-id-marker, this parameter specifies the multipart upload after which listing should begin.

    If upload-id-marker is not specified, only the keys lexicographically greater than the specified key-marker will be included in the list.

    If upload-id-marker is specified, any multipart uploads for a key equal to the key-marker might also be included, provided those multipart uploads have upload IDs lexicographically greater than the specified upload-id-marker.

    ", "ListObjectVersionsOutput$KeyMarker": "

    Marks the last Key returned in a truncated response.

    ", "ListObjectVersionsRequest$KeyMarker": "

    Specifies the key to start with when listing objects in a bucket.

    " } @@ -1846,31 +1858,31 @@ "LastModified": { "base": null, "refs": { - "CopyObjectResult$LastModified": "

    ", + "CopyObjectResult$LastModified": "

    Returns the date that the object was last modified.

    ", "CopyPartResult$LastModified": "

    Date and time at which the object was uploaded.

    ", "DeleteMarkerEntry$LastModified": "

    Date and time the object was last modified.

    ", "GetObjectOutput$LastModified": "

    Last modified date of the object

    ", "HeadObjectOutput$LastModified": "

    Last modified date of the object

    ", - "Object$LastModified": "

    ", + "Object$LastModified": "

    The date the Object was Last Modified

    ", "ObjectVersion$LastModified": "

    Date and time the object was last modified.

    ", "Part$LastModified": "

    Date and time at which the part was uploaded.

    " } }, "LifecycleConfiguration": { - "base": "

    ", + "base": "

    Container for lifecycle rules. You can add as many as 1000 rules.

    ", "refs": { "PutBucketLifecycleRequest$LifecycleConfiguration": "

    " } }, "LifecycleExpiration": { - "base": "

    ", + "base": "

    Container for the expiration for the lifecycle of the object.

    ", "refs": { - "LifecycleRule$Expiration": "

    ", - "Rule$Expiration": "

    " + "LifecycleRule$Expiration": "

    Specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker.

    ", + "Rule$Expiration": "

    Specifies the expiration for the lifecycle of the object.

    " } }, "LifecycleRule": { - "base": "

    ", + "base": "

    A lifecycle rule for individual objects in an Amazon S3 bucket.

    ", "refs": { "LifecycleRules$member": null } @@ -1878,20 +1890,20 @@ "LifecycleRuleAndOperator": { "base": "

    This is used in a Lifecycle Rule Filter to apply a logical AND to two or more predicates. The Lifecycle Rule will apply to any object matching all of the predicates configured inside the And operator.

    ", "refs": { - "LifecycleRuleFilter$And": "

    " + "LifecycleRuleFilter$And": null } }, "LifecycleRuleFilter": { "base": "

    The Filter is used to identify objects that a Lifecycle Rule applies to. A Filter must have exactly one of Prefix, Tag, or And specified.

    ", "refs": { - "LifecycleRule$Filter": "

    " + "LifecycleRule$Filter": null } }, "LifecycleRules": { "base": null, "refs": { "BucketLifecycleConfiguration$Rules": "

    A lifecycle rule for individual objects in an Amazon S3 bucket.

    ", - "GetBucketLifecycleConfigurationOutput$Rules": "

    " + "GetBucketLifecycleConfigurationOutput$Rules": "

    Container for a lifecycle rule.

    " } }, "ListBucketAnalyticsConfigurationsOutput": { @@ -1982,8 +1994,8 @@ "Location": { "base": null, "refs": { - "CompleteMultipartUploadOutput$Location": "

    ", - "CreateBucketOutput$Location": "

    " + "CompleteMultipartUploadOutput$Location": "

    The URI that identifies the newly created object.

    ", + "CreateBucketOutput$Location": "

    Specifies the region where the bucket will be created. If you are creating a bucket on the US East (N. Virginia) region (us-east-1), you do not need to specify the location.

    " } }, "LocationPrefix": { @@ -1995,15 +2007,15 @@ "LoggingEnabled": { "base": "

    Describes where logs are stored and the prefix that Amazon S3 assigns to all log object keys for a bucket. For more information, see PUT Bucket logging in the Amazon Simple Storage Service API Reference.

    ", "refs": { - "BucketLoggingStatus$LoggingEnabled": "

    ", - "GetBucketLoggingOutput$LoggingEnabled": "

    " + "BucketLoggingStatus$LoggingEnabled": null, + "GetBucketLoggingOutput$LoggingEnabled": null } }, "MFA": { "base": null, "refs": { - "DeleteObjectRequest$MFA": "

    The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.

    ", - "DeleteObjectsRequest$MFA": "

    The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.

    ", + "DeleteObjectRequest$MFA": "

    The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. Required to permanently delete a versionedobject if versioning is configured with MFA Deleteenabled.

    ", + "DeleteObjectsRequest$MFA": "

    The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device. Required to permanently delete a versioned object if versioning is configured with MFA Delete enabled.

    ", "PutBucketVersioningRequest$MFA": "

    The concatenation of the authentication device's serial number, a space, and the value that is displayed on your authentication device.

    " } }, @@ -2022,7 +2034,7 @@ "Marker": { "base": null, "refs": { - "ListObjectsOutput$Marker": "

    ", + "ListObjectsOutput$Marker": "

    Indicates where in the bucket listing begins. Marker is included in the response if it was sent with the request.

    ", "ListObjectsRequest$Marker": "

    Specifies the key to start with when listing objects in a bucket.

    " } }, @@ -2035,9 +2047,9 @@ "MaxKeys": { "base": null, "refs": { - "ListObjectVersionsOutput$MaxKeys": "

    ", - "ListObjectVersionsRequest$MaxKeys": "

    Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

    ", - "ListObjectsOutput$MaxKeys": "

    ", + "ListObjectVersionsOutput$MaxKeys": "

    Specifies the maximum number of objects to return.

    ", + "ListObjectVersionsRequest$MaxKeys": "

    Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.

    ", + "ListObjectsOutput$MaxKeys": "

    The maximum number of keys returned in the response body.

    ", "ListObjectsRequest$MaxKeys": "

    Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

    ", "ListObjectsV2Output$MaxKeys": "

    Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

    ", "ListObjectsV2Request$MaxKeys": "

    Sets the maximum number of keys returned in the response. The response might contain fewer keys but will never contain more.

    " @@ -2060,7 +2072,7 @@ "Message": { "base": null, "refs": { - "Error$Message": "

    " + "Error$Message": "

    The error message contains a generic description of the error condition in English. It is intended for a human audience. Simple programs display the message directly to the end user if they encounter an error condition they don't know how or don't care to handle. Sophisticated programs with more exhaustive error handling and proper internationalization are more likely to ignore the error message.

    " } }, "Metadata": { @@ -2089,18 +2101,24 @@ "base": null, "refs": { "Metadata$key": null, - "MetadataEntry$Name": "

    " + "MetadataEntry$Name": "

    Name of the Object.

    " } }, "MetadataValue": { "base": null, "refs": { "Metadata$value": null, - "MetadataEntry$Value": "

    " + "MetadataEntry$Value": "

    Value of the Object.

    " + } + }, + "Metrics": { + "base": "

    A container specifying replication metrics-related information, including whether emitting metrics and Amazon S3 events for replication are enabled. In addition, contains configurations related to specific metrics or events. Must be specified together with a ReplicationTime block.

    ", + "refs": { + "Destination$Metrics": "

    A container specifying replication metrics-related information, including whether emitting metrics and Amazon S3 events for replication are enabled. In addition, contains configurations related to specific metrics or events. Must be specified together with a ReplicationTime block.

    " } }, "MetricsAndOperator": { - "base": "

    ", + "base": "

    A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.

    ", "refs": { "MetricsFilter$And": "

    A conjunction (logical AND) of predicates, which is used in evaluating a metrics filter. The operator must have at least two predicates, and an object must match all of the predicates in order for the filter to apply.

    " } @@ -2120,7 +2138,7 @@ } }, "MetricsFilter": { - "base": "

    ", + "base": "

    Specifies a metrics configuration filter. The metrics configuration only includes objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction (MetricsAndOperator).

    ", "refs": { "MetricsConfiguration$Filter": "

    Specifies a metrics configuration filter. The metrics configuration will only include objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction (MetricsAndOperator).

    " } @@ -2134,6 +2152,18 @@ "PutBucketMetricsConfigurationRequest$Id": "

    The ID used to identify the metrics configuration.

    " } }, + "MetricsStatus": { + "base": null, + "refs": { + "Metrics$Status": "

    Specifies whether the replication metrics are enabled.

    " + } + }, + "Minutes": { + "base": null, + "refs": { + "ReplicationTimeValue$Minutes": "

    Contains an integer specifying time in minutes.

    " + } + }, "MissingMeta": { "base": null, "refs": { @@ -2142,7 +2172,7 @@ } }, "MultipartUpload": { - "base": "

    ", + "base": "

    Container for the MultipartUpload for the Amazon S3 object.

    ", "refs": { "MultipartUploadList$member": null } @@ -2151,7 +2181,7 @@ "base": null, "refs": { "AbortMultipartUploadRequest$UploadId": "

    Upload ID that identifies the multipart upload.

    ", - "CompleteMultipartUploadRequest$UploadId": "

    ", + "CompleteMultipartUploadRequest$UploadId": "

    ID for the initiated multipart upload.

    ", "CreateMultipartUploadOutput$UploadId": "

    ID for the initiated multipart upload.

    ", "ListPartsOutput$UploadId": "

    Upload ID identifying the multipart upload whose parts are being listed.

    ", "ListPartsRequest$UploadId": "

    Upload ID identifying the multipart upload whose parts are being listed.

    ", @@ -2163,14 +2193,14 @@ "MultipartUploadList": { "base": null, "refs": { - "ListMultipartUploadsOutput$Uploads": "

    " + "ListMultipartUploadsOutput$Uploads": "

    Container for elements related to a particular multipart upload. A response can contain zero or more Upload elements.

    " } }, "NextKeyMarker": { "base": null, "refs": { "ListMultipartUploadsOutput$NextKeyMarker": "

    When a list is truncated, this element specifies the value that should be used for the key-marker request parameter in a subsequent request.

    ", - "ListObjectVersionsOutput$NextKeyMarker": "

    Use this value for the key marker request parameter in a subsequent request.

    " + "ListObjectVersionsOutput$NextKeyMarker": "

    When the number of responses exceeds the value of MaxKeys, NextKeyMarker specifies the first key not returned that satisfies the search criteria. Use this value for the key-marker request parameter in a subsequent request.

    " } }, "NextMarker": { @@ -2203,7 +2233,7 @@ "NextVersionIdMarker": { "base": null, "refs": { - "ListObjectVersionsOutput$NextVersionIdMarker": "

    Use this value for the next version id marker parameter in a subsequent request.

    " + "ListObjectVersionsOutput$NextVersionIdMarker": "

    When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker specifies the first object version not returned that satisfies the search criteria. Use this value for the version-id-marker request parameter in a subsequent request.

    " } }, "NoSuchBucket": { @@ -2224,41 +2254,41 @@ "NoncurrentVersionExpiration": { "base": "

    Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 permanently deletes the noncurrent object versions. You set this lifecycle configuration action on a bucket that has versioning enabled (or suspended) to request that Amazon S3 delete noncurrent object versions at a specific period in the object's lifetime.

    ", "refs": { - "LifecycleRule$NoncurrentVersionExpiration": "

    ", - "Rule$NoncurrentVersionExpiration": "

    " + "LifecycleRule$NoncurrentVersionExpiration": null, + "Rule$NoncurrentVersionExpiration": null } }, "NoncurrentVersionTransition": { "base": "

    Container for the transition rule that describes when noncurrent objects transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's lifetime.

    ", "refs": { "NoncurrentVersionTransitionList$member": null, - "Rule$NoncurrentVersionTransition": "

    " + "Rule$NoncurrentVersionTransition": null } }, "NoncurrentVersionTransitionList": { "base": null, "refs": { - "LifecycleRule$NoncurrentVersionTransitions": "

    " + "LifecycleRule$NoncurrentVersionTransitions": "

    Specifies the transition rule for the lifecycle rule that describes when noncurrent objects transition to the a specific storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to the a specifc storage class at a set period in the object's lifetime.

    " } }, "NotificationConfiguration": { "base": "

    A container for specifying the notification configuration of the bucket. If this element is empty, notifications are turned off for the bucket.

    ", "refs": { - "PutBucketNotificationConfigurationRequest$NotificationConfiguration": "

    " + "PutBucketNotificationConfigurationRequest$NotificationConfiguration": null } }, "NotificationConfigurationDeprecated": { "base": null, "refs": { - "PutBucketNotificationRequest$NotificationConfiguration": "

    " + "PutBucketNotificationRequest$NotificationConfiguration": "

    The container for the configuration.

    " } }, "NotificationConfigurationFilter": { "base": "

    Specifies object key name filtering rules. For information about key name filtering, see Configuring Event Notifications in the Amazon Simple Storage Service Developer Guide.

    ", "refs": { - "LambdaFunctionConfiguration$Filter": "

    ", - "QueueConfiguration$Filter": "

    ", - "TopicConfiguration$Filter": "

    " + "LambdaFunctionConfiguration$Filter": null, + "QueueConfiguration$Filter": null, + "TopicConfiguration$Filter": null } }, "NotificationId": { @@ -2273,7 +2303,7 @@ } }, "Object": { - "base": "

    ", + "base": "

    An object consists of data and its descriptive metadata.

    ", "refs": { "ObjectList$member": null } @@ -2288,13 +2318,13 @@ "refs": { "CopyObjectRequest$ACL": "

    The canned ACL to apply to the object.

    ", "CreateMultipartUploadRequest$ACL": "

    The canned ACL to apply to the object.

    ", - "PutObjectAclRequest$ACL": "

    The canned ACL to apply to the object.

    ", - "PutObjectRequest$ACL": "

    The canned ACL to apply to the object.

    ", + "PutObjectAclRequest$ACL": "

    The canned ACL to apply to the object. For more information, see Canned ACL

    ", + "PutObjectRequest$ACL": "

    The canned ACL to apply to the object. For more information, see Canned ACL.

    ", "S3Location$CannedACL": "

    The canned ACL to apply to the restore results.

    " } }, "ObjectIdentifier": { - "base": "

    ", + "base": "

    Object Identifier is unique value to identify objects.

    ", "refs": { "ObjectIdentifierList$member": null } @@ -2302,73 +2332,73 @@ "ObjectIdentifierList": { "base": null, "refs": { - "Delete$Objects": "

    " + "Delete$Objects": "

    The objects to delete.

    " } }, "ObjectKey": { "base": null, "refs": { "AbortMultipartUploadRequest$Key": "

    Key of the object for which the multipart upload was initiated.

    ", - "CompleteMultipartUploadOutput$Key": "

    ", - "CompleteMultipartUploadRequest$Key": "

    ", - "CopyObjectRequest$Key": "

    ", + "CompleteMultipartUploadOutput$Key": "

    The object key of the newly created object.

    ", + "CompleteMultipartUploadRequest$Key": "

    Object key for which the multipart upload was initiated.

    ", + "CopyObjectRequest$Key": "

    The key of the destination object.

    ", "CreateMultipartUploadOutput$Key": "

    Object key for which the multipart upload was initiated.

    ", - "CreateMultipartUploadRequest$Key": "

    ", + "CreateMultipartUploadRequest$Key": "

    Object key for which the multipart upload is to be initiated.

    ", "DeleteMarkerEntry$Key": "

    The object key.

    ", - "DeleteObjectRequest$Key": "

    ", - "DeleteObjectTaggingRequest$Key": "

    ", - "DeletedObject$Key": "

    ", - "Error$Key": "

    ", + "DeleteObjectRequest$Key": "

    Key name of the object to delete.

    ", + "DeleteObjectTaggingRequest$Key": "

    Name of the tag.

    ", + "DeletedObject$Key": "

    The name of the deleted object.

    ", + "Error$Key": "

    The error key.

    ", "ErrorDocument$Key": "

    The object key name to use when a 4XX class error occurs.

    ", - "GetObjectAclRequest$Key": "

    ", + "GetObjectAclRequest$Key": "

    The key of the object for which to get the ACL information.

    ", "GetObjectLegalHoldRequest$Key": "

    The key name for the object whose Legal Hold status you want to retrieve.

    ", - "GetObjectRequest$Key": "

    ", + "GetObjectRequest$Key": "

    Key of the object to get.

    ", "GetObjectRetentionRequest$Key": "

    The key name for the object whose retention settings you want to retrieve.

    ", - "GetObjectTaggingRequest$Key": "

    ", - "GetObjectTorrentRequest$Key": "

    ", - "HeadObjectRequest$Key": "

    ", + "GetObjectTaggingRequest$Key": "

    Object key for which to get the tagging information.

    ", + "GetObjectTorrentRequest$Key": "

    The object key for which to get the information.

    ", + "HeadObjectRequest$Key": "

    The object key.

    ", "ListPartsOutput$Key": "

    Object key for which the multipart upload was initiated.

    ", - "ListPartsRequest$Key": "

    ", + "ListPartsRequest$Key": "

    Object key for which the multipart upload was initiated.

    ", "MultipartUpload$Key": "

    Key of the object for which the multipart upload was initiated.

    ", - "Object$Key": "

    ", + "Object$Key": "

    The name that you assign to an object. You use the object key to retrieve the object.

    ", "ObjectIdentifier$Key": "

    Key name of the object to delete.

    ", "ObjectVersion$Key": "

    The object key.

    ", - "PutObjectAclRequest$Key": "

    ", + "PutObjectAclRequest$Key": "

    Key for which the PUT operation was initiated.

    ", "PutObjectLegalHoldRequest$Key": "

    The key name for the object that you want to place a Legal Hold on.

    ", "PutObjectRequest$Key": "

    Object key for which the PUT operation was initiated.

    ", "PutObjectRetentionRequest$Key": "

    The key name for the object that you want to apply this Object Retention configuration to.

    ", - "PutObjectTaggingRequest$Key": "

    ", - "RestoreObjectRequest$Key": "

    ", + "PutObjectTaggingRequest$Key": "

    Name of the tag.

    ", + "RestoreObjectRequest$Key": "

    Object key for which the operation was initiated.

    ", "SelectObjectContentRequest$Key": "

    The object key.

    ", "Tag$Key": "

    Name of the tag.

    ", - "UploadPartCopyRequest$Key": "

    ", + "UploadPartCopyRequest$Key": "

    Object key for which the multipart upload was initiated.

    ", "UploadPartRequest$Key": "

    Object key for which the multipart upload was initiated.

    " } }, "ObjectList": { "base": null, "refs": { - "ListObjectsOutput$Contents": "

    ", + "ListObjectsOutput$Contents": "

    Metadata about each object returned.

    ", "ListObjectsV2Output$Contents": "

    Metadata about each object returned.

    " } }, "ObjectLockConfiguration": { - "base": "

    The container element for object lock configuration parameters.

    ", + "base": "

    The container element for Object Lock configuration parameters.

    ", "refs": { - "GetObjectLockConfigurationOutput$ObjectLockConfiguration": "

    The specified bucket's object lock configuration.

    ", - "PutObjectLockConfigurationRequest$ObjectLockConfiguration": "

    The object lock configuration that you want to apply to the specified bucket.

    " + "GetObjectLockConfigurationOutput$ObjectLockConfiguration": "

    The specified bucket's Object Lock configuration.

    ", + "PutObjectLockConfigurationRequest$ObjectLockConfiguration": "

    The Object Lock configuration that you want to apply to the specified bucket.

    " } }, "ObjectLockEnabled": { "base": null, "refs": { - "ObjectLockConfiguration$ObjectLockEnabled": "

    Indicates whether this bucket has an object lock configuration enabled.

    " + "ObjectLockConfiguration$ObjectLockEnabled": "

    Indicates whether this bucket has an Object Lock configuration enabled.

    " } }, "ObjectLockEnabledForBucket": { "base": null, "refs": { - "CreateBucketRequest$ObjectLockEnabledForBucket": "

    Specifies whether you want Amazon S3 object lock to be enabled for the new bucket.

    " + "CreateBucketRequest$ObjectLockEnabledForBucket": "

    Specifies whether you want S3 Object Lock to be enabled for the new bucket.

    " } }, "ObjectLockLegalHold": { @@ -2383,30 +2413,30 @@ "refs": { "CopyObjectRequest$ObjectLockLegalHoldStatus": "

    Specifies whether you want to apply a Legal Hold to the copied object.

    ", "CreateMultipartUploadRequest$ObjectLockLegalHoldStatus": "

    Specifies whether you want to apply a Legal Hold to the uploaded object.

    ", - "GetObjectOutput$ObjectLockLegalHoldStatus": "

    Indicates whether this object has an active legal hold. This field is only returned if you have permission to view an object's legal hold status.

    ", - "HeadObjectOutput$ObjectLockLegalHoldStatus": "

    The Legal Hold status for the specified object.

    ", + "GetObjectOutput$ObjectLockLegalHoldStatus": "

    Indicates whether this object has an active legal hold. This field is only returned if you have permission to view an object's legal hold status.

    ", + "HeadObjectOutput$ObjectLockLegalHoldStatus": "

    Specifies whether a legal hold is in effect for this object. This header is only returned if the requester has the s3:GetObjectLegalHold permission. This header is not returned if the specified version of this object has never had a legal hold applied. For more information about S3 Object Lock, see Object Lock.

    ", "ObjectLockLegalHold$Status": "

    Indicates whether the specified object has a Legal Hold in place.

    ", - "PutObjectRequest$ObjectLockLegalHoldStatus": "

    The Legal Hold status that you want to apply to the specified object.

    " + "PutObjectRequest$ObjectLockLegalHoldStatus": "

    Specifies whether a legal hold will be applied to this object. For more information about S3 Object Lock, see Object Lock.

    " } }, "ObjectLockMode": { "base": null, "refs": { - "CopyObjectRequest$ObjectLockMode": "

    The object lock mode that you want to apply to the copied object.

    ", - "CreateMultipartUploadRequest$ObjectLockMode": "

    Specifies the object lock mode that you want to apply to the uploaded object.

    ", - "GetObjectOutput$ObjectLockMode": "

    The object lock mode currently in place for this object.

    ", - "HeadObjectOutput$ObjectLockMode": "

    The object lock mode currently in place for this object.

    ", - "PutObjectRequest$ObjectLockMode": "

    The object lock mode that you want to apply to this object.

    " + "CopyObjectRequest$ObjectLockMode": "

    The Object Lock mode that you want to apply to the copied object.

    ", + "CreateMultipartUploadRequest$ObjectLockMode": "

    Specifies the Object Lock mode that you want to apply to the uploaded object.

    ", + "GetObjectOutput$ObjectLockMode": "

    The Object Lock mode currently in place for this object.

    ", + "HeadObjectOutput$ObjectLockMode": "

    The Object Lock mode, if any, that's in effect for this object. This header is only returned if the requester has the s3:GetObjectRetention permission. For more information about S3 Object Lock, see Object Lock.

    ", + "PutObjectRequest$ObjectLockMode": "

    The Object Lock mode that you want to apply to this object.

    " } }, "ObjectLockRetainUntilDate": { "base": null, "refs": { - "CopyObjectRequest$ObjectLockRetainUntilDate": "

    The date and time when you want the copied object's object lock to expire.

    ", - "CreateMultipartUploadRequest$ObjectLockRetainUntilDate": "

    Specifies the date and time when you want the object lock to expire.

    ", - "GetObjectOutput$ObjectLockRetainUntilDate": "

    The date and time when this object's object lock will expire.

    ", - "HeadObjectOutput$ObjectLockRetainUntilDate": "

    The date and time when this object's object lock expires.

    ", - "PutObjectRequest$ObjectLockRetainUntilDate": "

    The date and time when you want this object's object lock to expire.

    " + "CopyObjectRequest$ObjectLockRetainUntilDate": "

    The date and time when you want the copied object's Object Lock to expire.

    ", + "CreateMultipartUploadRequest$ObjectLockRetainUntilDate": "

    Specifies the date and time when you want the Object Lock to expire.

    ", + "GetObjectOutput$ObjectLockRetainUntilDate": "

    The date and time when this object's Object Lock will expire.

    ", + "HeadObjectOutput$ObjectLockRetainUntilDate": "

    The date and time when the Object Lock retention period expires. This header is only returned if the requester has the s3:GetObjectRetention permission.

    ", + "PutObjectRequest$ObjectLockRetainUntilDate": "

    The date and time when you want this object's Object Lock to expire.

    " } }, "ObjectLockRetention": { @@ -2419,21 +2449,21 @@ "ObjectLockRetentionMode": { "base": null, "refs": { - "DefaultRetention$Mode": "

    The default object lock retention mode you want to apply to new objects placed in the specified bucket.

    ", + "DefaultRetention$Mode": "

    The default Object Lock retention mode you want to apply to new objects placed in the specified bucket.

    ", "ObjectLockRetention$Mode": "

    Indicates the Retention mode for the specified object.

    " } }, "ObjectLockRule": { - "base": "

    The container element for an object lock rule.

    ", + "base": "

    The container element for an Object Lock rule.

    ", "refs": { - "ObjectLockConfiguration$Rule": "

    The object lock rule in place for the specified object.

    " + "ObjectLockConfiguration$Rule": "

    The Object Lock rule in place for the specified object.

    " } }, "ObjectLockToken": { "base": null, "refs": { - "PutBucketReplicationRequest$Token": "

    A token that allows Amazon S3 object lock to be enabled for an existing bucket.

    ", - "PutObjectLockConfigurationRequest$Token": "

    A token to allow Amazon S3 object lock to be enabled for an existing bucket.

    " + "PutBucketReplicationRequest$Token": "

    ", + "PutObjectLockConfigurationRequest$Token": "

    A token to allow Object Lock to be enabled for an existing bucket.

    " } }, "ObjectNotInActiveTierError": { @@ -2448,7 +2478,7 @@ } }, "ObjectVersion": { - "base": "

    ", + "base": "

    The version of an object.

    ", "refs": { "ObjectVersionList$member": null } @@ -2456,22 +2486,22 @@ "ObjectVersionId": { "base": null, "refs": { - "CompleteMultipartUploadOutput$VersionId": "

    Version of the object.

    ", + "CompleteMultipartUploadOutput$VersionId": "

    Version ID of the newly created object, in case the bucket has versioning turned on.

    ", "CopyObjectOutput$VersionId": "

    Version ID of the newly created copy.

    ", "DeleteMarkerEntry$VersionId": "

    Version ID of an object.

    ", "DeleteObjectOutput$VersionId": "

    Returns the version ID of the delete marker created as a result of the DELETE operation.

    ", "DeleteObjectRequest$VersionId": "

    VersionId used to reference a specific version of the object.

    ", "DeleteObjectTaggingOutput$VersionId": "

    The versionId of the object the tag-set was removed from.

    ", "DeleteObjectTaggingRequest$VersionId": "

    The versionId of the object that the tag-set will be removed from.

    ", - "DeletedObject$VersionId": "

    ", - "Error$VersionId": "

    ", + "DeletedObject$VersionId": "

    The version ID of the deleted object.

    ", + "Error$VersionId": "

    The version ID of the error.

    ", "GetObjectAclRequest$VersionId": "

    VersionId used to reference a specific version of the object.

    ", "GetObjectLegalHoldRequest$VersionId": "

    The version ID of the object whose Legal Hold status you want to retrieve.

    ", "GetObjectOutput$VersionId": "

    Version of the object.

    ", "GetObjectRequest$VersionId": "

    VersionId used to reference a specific version of the object.

    ", "GetObjectRetentionRequest$VersionId": "

    The version ID for the object whose retention settings you want to retrieve.

    ", - "GetObjectTaggingOutput$VersionId": "

    ", - "GetObjectTaggingRequest$VersionId": "

    ", + "GetObjectTaggingOutput$VersionId": "

    The versionId of the object for which you got the tagging information.

    ", + "GetObjectTaggingRequest$VersionId": "

    The versionId of the object for which to get the tagging information.

    ", "HeadObjectOutput$VersionId": "

    Version of the object.

    ", "HeadObjectRequest$VersionId": "

    VersionId used to reference a specific version of the object.

    ", "ObjectIdentifier$VersionId": "

    VersionId for the specific version of the object to delete.

    ", @@ -2480,15 +2510,15 @@ "PutObjectLegalHoldRequest$VersionId": "

    The version ID of the object that you want to place a Legal Hold on.

    ", "PutObjectOutput$VersionId": "

    Version of the object.

    ", "PutObjectRetentionRequest$VersionId": "

    The version ID for the object that you want to apply this Object Retention configuration to.

    ", - "PutObjectTaggingOutput$VersionId": "

    ", - "PutObjectTaggingRequest$VersionId": "

    ", - "RestoreObjectRequest$VersionId": "

    " + "PutObjectTaggingOutput$VersionId": "

    The versionId of the object the tag-set was added to.

    ", + "PutObjectTaggingRequest$VersionId": "

    The versionId of the object that the tag-set will be added to.

    ", + "RestoreObjectRequest$VersionId": "

    VersionId used to reference a specific version of the object.

    " } }, "ObjectVersionList": { "base": null, "refs": { - "ListObjectVersionsOutput$Versions": "

    " + "ListObjectVersionsOutput$Versions": "

    Container for version information.

    " } }, "ObjectVersionStorageClass": { @@ -2511,17 +2541,17 @@ } }, "Owner": { - "base": "

    ", + "base": "

    Container for the owner's display name and ID

    ", "refs": { "AccessControlPolicy$Owner": "

    Container for the bucket owner's display name and ID.

    ", - "DeleteMarkerEntry$Owner": "

    ", - "GetBucketAclOutput$Owner": "

    ", - "GetObjectAclOutput$Owner": "

    ", - "ListBucketsOutput$Owner": "

    ", - "ListPartsOutput$Owner": "

    ", - "MultipartUpload$Owner": "

    ", - "Object$Owner": "

    ", - "ObjectVersion$Owner": "

    " + "DeleteMarkerEntry$Owner": "

    The account that created the delete marker.>

    ", + "GetBucketAclOutput$Owner": "

    Container for the bucket owner's display name and ID.

    ", + "GetObjectAclOutput$Owner": "

    Container for the bucket owner's display name and ID.

    ", + "ListBucketsOutput$Owner": "

    The owner of the buckets listed.

    ", + "ListPartsOutput$Owner": "

    Container element that identifies the object owner, after the object is created. If multipart upload is initiated by an IAM user, this element provides the parent account ID and display name.

    ", + "MultipartUpload$Owner": "

    Specifies the owner of the object that is part of the multipart upload.

    ", + "Object$Owner": "

    The owner of the object

    ", + "ObjectVersion$Owner": "

    Specifies the Owner of the object.

    " } }, "OwnerOverride": { @@ -2531,13 +2561,13 @@ } }, "ParquetInput": { - "base": "

    ", + "base": "

    Container for Parquet.

    ", "refs": { "InputSerialization$Parquet": "

    Specifies Parquet as object's input serialization format.

    " } }, "Part": { - "base": "

    ", + "base": "

    Container for elements related to a part.

    ", "refs": { "Parts$member": null } @@ -2556,14 +2586,14 @@ "PartNumberMarker": { "base": null, "refs": { - "ListPartsOutput$PartNumberMarker": "

    Part number after which listing begins.

    ", + "ListPartsOutput$PartNumberMarker": "

    When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request.

    ", "ListPartsRequest$PartNumberMarker": "

    Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.

    " } }, "Parts": { "base": null, "refs": { - "ListPartsOutput$Parts": "

    " + "ListPartsOutput$Parts": "

    Container for elements related to a particular part. A response can contain zero or more Part elements.

    " } }, "PartsCount": { @@ -2605,24 +2635,24 @@ "AnalyticsAndOperator$Prefix": "

    The prefix to use when evaluating an AND predicate: The prefix that an object must have to be included in the metrics results.

    ", "AnalyticsFilter$Prefix": "

    The prefix to use when evaluating an analytics filter.

    ", "AnalyticsS3BucketDestination$Prefix": "

    The prefix to use when exporting data. The prefix is prepended to all results.

    ", - "CommonPrefix$Prefix": "

    ", + "CommonPrefix$Prefix": "

    Container for the specified common prefix.

    ", "InventoryFilter$Prefix": "

    The prefix that an object must have to be included in the inventory results.

    ", "InventoryS3BucketDestination$Prefix": "

    The prefix that is prepended to all inventory results.

    ", "LifecycleRule$Prefix": "

    Prefix identifying one or more objects to which the rule applies. This is No longer used; use Filter instead.

    ", - "LifecycleRuleAndOperator$Prefix": "

    ", + "LifecycleRuleAndOperator$Prefix": "

    Prefix identifying one or more objects to which the rule applies.

    ", "LifecycleRuleFilter$Prefix": "

    Prefix identifying one or more objects to which the rule applies.

    ", "ListMultipartUploadsOutput$Prefix": "

    When a prefix is provided in the request, this field contains the specified prefix. The result contains only keys starting with the specified prefix.

    ", - "ListMultipartUploadsRequest$Prefix": "

    Lists in-progress uploads only for those keys that begin with the specified prefix.

    ", - "ListObjectVersionsOutput$Prefix": "

    ", - "ListObjectVersionsRequest$Prefix": "

    Limits the response to keys that begin with the specified prefix.

    ", - "ListObjectsOutput$Prefix": "

    ", + "ListMultipartUploadsRequest$Prefix": "

    Lists in-progress uploads only for those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different grouping of keys. (You can think of using prefix to make groups in the same way you'd use a folder in a file system.)

    ", + "ListObjectVersionsOutput$Prefix": "

    Selects objects that start with the value supplied by this parameter.

    ", + "ListObjectVersionsRequest$Prefix": "

    Use this parameter to select only those keys that begin with the specified prefix. You can use prefixes to separate a bucket into different groupings of keys. (You can think of using prefix to make groups in the same way you'd use a folder in a file system.) You can use prefix with delimiter to roll up numerous objects into a single result under CommonPrefixes.

    ", + "ListObjectsOutput$Prefix": "

    Keys that begin with the indicated prefix.

    ", "ListObjectsRequest$Prefix": "

    Limits the response to keys that begin with the specified prefix.

    ", - "ListObjectsV2Output$Prefix": "

    Limits the response to keys that begin with the specified prefix.

    ", + "ListObjectsV2Output$Prefix": "

    Keys that begin with the indicated prefix.

    ", "ListObjectsV2Request$Prefix": "

    Limits the response to keys that begin with the specified prefix.

    ", "MetricsAndOperator$Prefix": "

    The prefix used when evaluating an AND predicate.

    ", "MetricsFilter$Prefix": "

    The prefix used when evaluating a metrics filter.

    ", "ReplicationRule$Prefix": "

    An object keyname prefix that identifies the object or objects to which the rule applies. The maximum prefix length is 1,024 characters. To include all objects in a bucket, specify an empty string.

    ", - "ReplicationRuleAndOperator$Prefix": "

    ", + "ReplicationRuleAndOperator$Prefix": "

    An object keyname prefix that identifies the subset of objects to which the rule applies.

    ", "ReplicationRuleFilter$Prefix": "

    An object keyname prefix that identifies the subset of objects to which the rule applies.

    ", "Rule$Prefix": "

    Object key prefix that identifies one or more objects to which this rule applies.

    " } @@ -2630,17 +2660,17 @@ "Priority": { "base": null, "refs": { - "ReplicationRule$Priority": "

    The priority associated with the rule. If you specify multiple rules in a replication configuration, Amazon S3 prioritizes the rules to prevent conflicts when filtering. If two or more rules identify the same object based on a specified filter, the rule with higher priority takes precedence. For example:

    • Same object quality prefix based filter criteria If prefixes you specified in multiple rules overlap

    • Same object qualify tag based filter criteria specified in multiple rules

    For more information, see Cross-Region Replication (CRR) in the Amazon S3 Developer Guide.

    " + "ReplicationRule$Priority": "

    The priority associated with the rule. If you specify multiple rules in a replication configuration, Amazon S3 prioritizes the rules to prevent conflicts when filtering. If two or more rules identify the same object based on a specified filter, the rule with higher priority takes precedence. For example:

    • Same object quality prefix based filter criteria If prefixes you specified in multiple rules overlap

    • Same object qualify tag based filter criteria specified in multiple rules

    For more information, see Replication in the Amazon S3 Developer Guide.

    " } }, "Progress": { - "base": "

    ", + "base": "

    This data type contains information about progress of an operation.

    ", "refs": { "ProgressEvent$Details": "

    The Progress event details.

    " } }, "ProgressEvent": { - "base": "

    ", + "base": "

    This data type contains information about the progress event of an operation.

    ", "refs": { "SelectObjectContentEventStream$Progress": "

    The Progress Event.

    " } @@ -2653,7 +2683,7 @@ } }, "PublicAccessBlockConfiguration": { - "base": "

    Specifies the Block Public Access configuration for an Amazon S3 bucket.

    ", + "base": "

    The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

    ", "refs": { "GetPublicAccessBlockOutput$PublicAccessBlockConfiguration": "

    The PublicAccessBlock configuration currently in effect for this Amazon S3 bucket.

    ", "PutPublicAccessBlockRequest$PublicAccessBlockConfiguration": "

    The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.

    " @@ -2818,7 +2848,7 @@ "base": null, "refs": { "QueueConfiguration$QueueArn": "

    The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 publishes a message when it detects events of the specified type.

    ", - "QueueConfigurationDeprecated$Queue": "

    " + "QueueConfigurationDeprecated$Queue": "

    The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 publishes a message when it detects events of the specified type.

    " } }, "QueueConfiguration": { @@ -2828,9 +2858,9 @@ } }, "QueueConfigurationDeprecated": { - "base": "

    ", + "base": "

    This data type is deprecated. Please use QueueConfiguration for the same purposes. This dat type specifies the configuration for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified events.

    ", "refs": { - "NotificationConfigurationDeprecated$QueueConfiguration": "

    " + "NotificationConfigurationDeprecated$QueueConfiguration": "

    This data type is deprecated. This data type specifies the configuration for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified events.

    " } }, "QueueConfigurationList": { @@ -2848,21 +2878,21 @@ "QuoteCharacter": { "base": null, "refs": { - "CSVInput$QuoteCharacter": "

    Value used for escaping where the field delimiter is part of the value.

    ", - "CSVOutput$QuoteCharacter": "

    The value used for escaping where the field delimiter is part of the value.

    " + "CSVInput$QuoteCharacter": "

    A single character used for escaping when the field delimiter is part of the value. For example, if the value is a, b, Amazon S3 wraps this field value in quotation marks, as follows: \" a , b \".

    Type: String

    Default: \"

    Ancestors: CSV

    ", + "CSVOutput$QuoteCharacter": "

    A single character used for escaping when the field delimiter is part of the value. For example, if the value is a, b, Amazon S3 wraps this field value in quotation marks, as follows: \" a , b \".

    " } }, "QuoteEscapeCharacter": { "base": null, "refs": { - "CSVInput$QuoteEscapeCharacter": "

    The single character used for escaping the quote character inside an already escaped value.

    ", - "CSVOutput$QuoteEscapeCharacter": "

    Th single character used for escaping the quote character inside an already escaped value.

    " + "CSVInput$QuoteEscapeCharacter": "

    A single character used for escaping the quotation mark character inside an already escaped value. For example, the value \"\"\" a , b \"\"\" is parsed as \" a , b \".

    ", + "CSVOutput$QuoteEscapeCharacter": "

    The single character used for escaping the quote character inside an already escaped value.

    " } }, "QuoteFields": { "base": null, "refs": { - "CSVOutput$QuoteFields": "

    Indicates whether or not all output fields should be quoted.

    " + "CSVOutput$QuoteFields": "

    Indicates whether to use quotation marks around output fields.

    • ALWAYS: Always use quotation marks for output fields.

    • ASNEEDED: Use quotation marks for output fields when needed.

    " } }, "Range": { @@ -2875,13 +2905,13 @@ "RecordDelimiter": { "base": null, "refs": { - "CSVInput$RecordDelimiter": "

    The value used to separate individual records.

    ", - "CSVOutput$RecordDelimiter": "

    The value used to separate individual records.

    ", + "CSVInput$RecordDelimiter": "

    A single character used to separate individual records in the input. Instead of the default value, you can specify an arbitrary delimiter.

    ", + "CSVOutput$RecordDelimiter": "

    A single character used to separate individual records in the output. Instead of the default value, you can specify an arbitrary delimiter.

    ", "JSONOutput$RecordDelimiter": "

    The value used to separate individual records in the output.

    " } }, "RecordsEvent": { - "base": "

    ", + "base": "

    The container for the records event.

    ", "refs": { "SelectObjectContentEventStream$Records": "

    The Records Event.

    " } @@ -2895,7 +2925,7 @@ "RedirectAllRequestsTo": { "base": "

    Specifies the redirect behavior of all requests to a website endpoint of an Amazon S3 bucket.

    ", "refs": { - "GetBucketWebsiteOutput$RedirectAllRequestsTo": "

    ", + "GetBucketWebsiteOutput$RedirectAllRequestsTo": "

    Specifies the redirect behavior of all requests to a website endpoint of an Amazon S3 bucket.

    ", "WebsiteConfiguration$RedirectAllRequestsTo": "

    The redirect behavior for every request to this bucket's website endpoint.

    If you specify this property, you can't specify any other property.

    " } }, @@ -2920,8 +2950,8 @@ "ReplicationConfiguration": { "base": "

    A container for replication rules. You can add up to 1,000 rules. The maximum size of a replication configuration is 2 MB.

    ", "refs": { - "GetBucketReplicationOutput$ReplicationConfiguration": "

    ", - "PutBucketReplicationRequest$ReplicationConfiguration": "

    " + "GetBucketReplicationOutput$ReplicationConfiguration": null, + "PutBucketReplicationRequest$ReplicationConfiguration": null } }, "ReplicationRule": { @@ -2931,7 +2961,7 @@ } }, "ReplicationRuleAndOperator": { - "base": "

    ", + "base": "

    A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter.

    For example:

    • If you specify both a Prefix and a Tag filter, wrap these filters in an And tag.

    • If you specify a filter based on multiple tags, wrap the Tag elements in an And tag

    ", "refs": { "ReplicationRuleFilter$And": "

    A container for specifying rule filters. The filters determine the subset of objects to which the rule applies. This element is required only if you specify more than one filter. For example:

    • If you specify both a Prefix and a Tag filter, wrap these filters in an And tag.

    • If you specify a filter based on multiple tags, wrap the Tag elements in an And tag.

    " } @@ -2939,7 +2969,7 @@ "ReplicationRuleFilter": { "base": "

    A filter that identifies the subset of objects to which the replication rule applies. A Filter must specify exactly one Prefix, Tag, or an And child element.

    ", "refs": { - "ReplicationRule$Filter": "

    " + "ReplicationRule$Filter": null } }, "ReplicationRuleStatus": { @@ -2957,8 +2987,27 @@ "ReplicationStatus": { "base": null, "refs": { - "GetObjectOutput$ReplicationStatus": "

    ", - "HeadObjectOutput$ReplicationStatus": "

    " + "GetObjectOutput$ReplicationStatus": "

    Amazon S3 can return this if your request involves a bucket that is either a source or destination in a replication rule.

    ", + "HeadObjectOutput$ReplicationStatus": "

    Amazon S3 can return this header if your request involves a bucket that is either a source or destination in a replication rule.

    In replication you have a source bucket on which you configure replication and destination bucket where Amazon S3 stores object replicas. When you request an object (GetObject) or object metadata (HeadObject) from these buckets, Amazon S3 will return the x-amz-replication-status header in the response as follows:

    • If requesting object from the source bucket — Amazon S3 will return the x-amz-replication-status header if object in your request is eligible for replication.

      For example, suppose in your replication configuration you specify object prefix \"TaxDocs\" requesting Amazon S3 to replicate objects with key prefix \"TaxDocs\". Then any objects you upload with this key name prefix, for example \"TaxDocs/document1.pdf\", is eligible for replication. For any object request with this key name prefix Amazon S3 will return the x-amz-replication-status header with value PENDING, COMPLETED or FAILED indicating object replication status.

    • If requesting object from the destination bucket — Amazon S3 will return the x-amz-replication-status header with value REPLICA if object in your request is a replica that Amazon S3 created.

    For more information, see Replication.

    " + } + }, + "ReplicationTime": { + "base": "

    A container specifying the time when all objects and operations on objects are replicated. Must be specified together with a Metrics block.

    ", + "refs": { + "Destination$ReplicationTime": "

    A container specifying the time when all objects and operations on objects are replicated. Must be specified together with a Metrics block.

    " + } + }, + "ReplicationTimeStatus": { + "base": null, + "refs": { + "ReplicationTime$Status": "

    Specifies whether the replication time is enabled.

    " + } + }, + "ReplicationTimeValue": { + "base": "

    A container specifying the time value.

    ", + "refs": { + "Metrics$EventThreshold": "

    A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold event.

    ", + "ReplicationTime$Time": "

    A container specifying the time by which replication should complete for all objects and operations on objects.

    " } }, "RequestCharged": { @@ -3014,13 +3063,13 @@ } }, "RequestPaymentConfiguration": { - "base": "

    ", + "base": "

    Container for Payer.

    ", "refs": { - "PutBucketRequestPaymentRequest$RequestPaymentConfiguration": "

    " + "PutBucketRequestPaymentRequest$RequestPaymentConfiguration": "

    Container for Payer.

    " } }, "RequestProgress": { - "base": "

    ", + "base": "

    Container for specifiying if periodic QueryProgress messages should be sent.

    ", "refs": { "SelectObjectContentRequest$RequestProgress": "

    Specifies if periodic request progress information should be enabled.

    " } @@ -3065,7 +3114,7 @@ "base": null, "refs": { "GetObjectOutput$Restore": "

    Provides information about object restoration operation and expiration time of the restored object copy.

    ", - "HeadObjectOutput$Restore": "

    Provides information about object restoration operation and expiration time of the restored object copy.

    " + "HeadObjectOutput$Restore": "

    If the object is an archived object (an object whose storage class is GLACIER), the response includes this header if either the archive restoration is in progress (see RestoreObject or an archive copy is already restored.

    If an archive copy is already restored, the header value indicates when Amazon S3 is scheduled to delete the object copy. For example:

    x-amz-restore: ongoing-request=\"false\", expiry-date=\"Fri, 23 Dec 2012 00:00:00 GMT\"

    If the object restoration is in progress, the header returns the value ongoing-request=\"true\".

    For more information about archiving objects, see Transitioning Objects: General Considerations.

    " } }, "RestoreObjectOutput": { @@ -3087,7 +3136,7 @@ "RestoreRequest": { "base": "

    Container for restore job parameters.

    ", "refs": { - "RestoreObjectRequest$RestoreRequest": "

    " + "RestoreObjectRequest$RestoreRequest": null } }, "RestoreRequestType": { @@ -3099,7 +3148,7 @@ "Role": { "base": null, "refs": { - "ReplicationConfiguration$Role": "

    The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that Amazon S3 assumes when replicating objects. For more information, see How to Set Up Cross-Region Replication in the Amazon Simple Storage Service Developer Guide.

    " + "ReplicationConfiguration$Role": "

    The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that Amazon S3 assumes when replicating objects. For more information, see How to Set Up Replication in the Amazon Simple Storage Service Developer Guide.

    " } }, "RoutingRule": { @@ -3111,7 +3160,7 @@ "RoutingRules": { "base": null, "refs": { - "GetBucketWebsiteOutput$RoutingRules": "

    ", + "GetBucketWebsiteOutput$RoutingRules": "

    Rules that define when a redirect is applied and the redirect behavior.

    ", "WebsiteConfiguration$RoutingRules": "

    Rules that define when a redirect is applied and the redirect behavior.

    " } }, @@ -3124,14 +3173,14 @@ "Rules": { "base": null, "refs": { - "GetBucketLifecycleOutput$Rules": "

    ", - "LifecycleConfiguration$Rules": "

    " + "GetBucketLifecycleOutput$Rules": "

    Container for a lifecycle rule.

    ", + "LifecycleConfiguration$Rules": "

    Specifies lifecycle configuration rules for an Amazon S3 bucket.

    " } }, "S3KeyFilter": { "base": "

    A container for object key name prefix and suffix filtering rules.

    ", "refs": { - "NotificationConfigurationFilter$Key": "

    " + "NotificationConfigurationFilter$Key": null } }, "S3Location": { @@ -3213,20 +3262,20 @@ "SSEKMSKeyId": { "base": null, "refs": { - "CompleteMultipartUploadOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", - "CopyObjectOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", + "CompleteMultipartUploadOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.

    ", + "CopyObjectOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.

    ", "CopyObjectRequest$SSEKMSKeyId": "

    Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

    ", - "CreateMultipartUploadOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", + "CreateMultipartUploadOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.

    ", "CreateMultipartUploadRequest$SSEKMSKeyId": "

    Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

    ", "Encryption$KMSKeyId": "

    If the encryption type is aws:kms, this optional value specifies the AWS KMS key ID to use for encryption of job results.

    ", - "GetObjectOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", - "HeadObjectOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", - "PutObjectOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", - "PutObjectRequest$SSEKMSKeyId": "

    Specifies the AWS KMS key ID to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. Documentation on configuring any of the officially supported AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version

    ", - "SSEKMS$KeyId": "

    Specifies the ID of the AWS Key Management Service (KMS) master encryption key to use for encrypting Inventory reports.

    ", + "GetObjectOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.

    ", + "HeadObjectOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.

    ", + "PutObjectOutput$SSEKMSKeyId": "

    If the x-amz-server-side-encryption is present and has the value of aws:kms, this header specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.

    ", + "PutObjectRequest$SSEKMSKeyId": "

    If the x-amz-server-side-encryption is present and has the value of aws:kms, this header specifies the ID of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used for the object.

    If the value of x-amz-server-side-encryption is aws:kms, this header specifies the ID of the AWS KMS CMK that will be used for the object. If you specify x-amz-server-side-encryption:aws:kms, but do not provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK in AWS to protect the data.

    ", + "SSEKMS$KeyId": "

    Specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) to use for encrypting Inventory reports.

    ", "ServerSideEncryptionByDefault$KMSMasterKeyID": "

    KMS master key ID to use for the default encryption. This parameter is allowed if and only if SSEAlgorithm is set to aws:kms.

    ", - "UploadPartCopyOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    ", - "UploadPartOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) master encryption key that was used for the object.

    " + "UploadPartCopyOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) that was used for the object.

    ", + "UploadPartOutput$SSEKMSKeyId": "

    If present, specifies the ID of the AWS Key Management Service (KMS) customer master key (CMK) was used for the object.

    " } }, "SSES3": { @@ -3238,13 +3287,13 @@ "ScanRange": { "base": null, "refs": { - "SelectObjectContentRequest$ScanRange": "

    Specifies the byte range of the object to get the records from. A record is processed when its first byte is contained by the range. This parameter is optional, but when specified, it must not be empty. See RFC 2616, Section 14.35.1 about how to specify the start and end of the range.

    " + "SelectObjectContentRequest$ScanRange": "

    Specifies the byte range of the object to get the records from. A record is processed when its first byte is contained by the range. This parameter is optional, but when specified, it must not be empty. See RFC 2616, Section 14.35.1 about how to specify the start and end of the range.

    ScanRangemay be used in the following ways:

    • <scanrange><start>50</start><end>100</end></scanrange> - process only the records starting between the bytes 50 and 100 (inclusive, counting from zero)

    • <scanrange><start>50</start></scanrange> - process only the records starting after the byte 50

    • <scanrange><end>50</end></scanrange> - process only the records within the last 50 bytes of the file.

    " } }, "SelectObjectContentEventStream": { - "base": "

    ", + "base": "

    The continer for selecting objects from a content event stream.

    ", "refs": { - "SelectObjectContentOutput$Payload": "

    " + "SelectObjectContentOutput$Payload": "

    The array of results.

    " } }, "SelectObjectContentOutput": { @@ -3266,15 +3315,15 @@ "ServerSideEncryption": { "base": null, "refs": { - "CompleteMultipartUploadOutput$ServerSideEncryption": "

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "CompleteMultipartUploadOutput$ServerSideEncryption": "

    If you specified server-side encryption either with an Amazon S3-managed encryption key or an AWS KMS customer master key (CMK) in your initiate multipart upload request, the response includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the object.

    ", "CopyObjectOutput$ServerSideEncryption": "

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", "CopyObjectRequest$ServerSideEncryption": "

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", "CreateMultipartUploadOutput$ServerSideEncryption": "

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", "CreateMultipartUploadRequest$ServerSideEncryption": "

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", "Encryption$EncryptionType": "

    The server-side encryption algorithm used when storing job results in Amazon S3 (e.g., AES256, aws:kms).

    ", "GetObjectOutput$ServerSideEncryption": "

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", - "HeadObjectOutput$ServerSideEncryption": "

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", - "PutObjectOutput$ServerSideEncryption": "

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "HeadObjectOutput$ServerSideEncryption": "

    If the object is stored using server-side encryption either with an AWS KMS customer master key (CMK) or an Amazon S3-managed encryption key, the response includes this header with the value of the Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", + "PutObjectOutput$ServerSideEncryption": "

    If you specified server-side encryption either with an AWS KMS customer master key (CMK) or Amazon S3-managed encryption key in your PUT request, the response includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the object.

    ", "PutObjectRequest$ServerSideEncryption": "

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", "ServerSideEncryptionByDefault$SSEAlgorithm": "

    Server-side encryption algorithm to use for the default encryption.

    ", "UploadPartCopyOutput$ServerSideEncryption": "

    The Server-side encryption algorithm used when storing this object in S3 (e.g., AES256, aws:kms).

    ", @@ -3290,8 +3339,8 @@ "ServerSideEncryptionConfiguration": { "base": "

    Specifies the default server-side-encryption configuration.

    ", "refs": { - "GetBucketEncryptionOutput$ServerSideEncryptionConfiguration": "

    ", - "PutBucketEncryptionRequest$ServerSideEncryptionConfiguration": "

    " + "GetBucketEncryptionOutput$ServerSideEncryptionConfiguration": null, + "PutBucketEncryptionRequest$ServerSideEncryptionConfiguration": null } }, "ServerSideEncryptionRule": { @@ -3309,7 +3358,7 @@ "Setting": { "base": null, "refs": { - "PublicAccessBlockConfiguration$BlockPublicAcls": "

    Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to TRUE causes the following behavior:

    • PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public.

    • PUT Object calls fail if the request includes a public ACL.

    Enabling this setting doesn't affect existing policies or ACLs.

    ", + "PublicAccessBlockConfiguration$BlockPublicAcls": "

    Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to TRUE causes the following behavior:

    • PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public.

    • PUT Object calls fail if the request includes a public ACL.

    • PUT Bucket calls fail if the request includes a public ACL.

    Enabling this setting doesn't affect existing policies or ACLs.

    ", "PublicAccessBlockConfiguration$IgnorePublicAcls": "

    Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore all public ACLs on this bucket and objects in this bucket.

    Enabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set.

    ", "PublicAccessBlockConfiguration$BlockPublicPolicy": "

    Specifies whether Amazon S3 should block public bucket policies for this bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.

    Enabling this setting doesn't affect existing bucket policies.

    ", "PublicAccessBlockConfiguration$RestrictPublicBuckets": "

    Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE restricts access to this bucket to only AWS services and authorized users within this account if the bucket has a public policy.

    Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.

    " @@ -3318,15 +3367,15 @@ "Size": { "base": null, "refs": { - "Object$Size": "

    ", + "Object$Size": "

    Size in bytes of the object

    ", "ObjectVersion$Size": "

    Size in bytes of the object.

    ", "Part$Size": "

    Size in bytes of the uploaded part data.

    " } }, "SourceSelectionCriteria": { - "base": "

    A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using an AWS KMS-Managed Key (SSE-KMS).

    ", + "base": "

    A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS).

    ", "refs": { - "ReplicationRule$SourceSelectionCriteria": "

    A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using an AWS KMS-Managed Key (SSE-KMS).

    " + "ReplicationRule$SourceSelectionCriteria": "

    A container that describes additional filters for identifying the source objects that you want to replicate. You can choose to enable or disable the replication of these objects. Currently, Amazon S3 supports only the filter that you can specify for objects created with server-side encryption using a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS).

    " } }, "SseKmsEncryptedObjects": { @@ -3338,30 +3387,30 @@ "SseKmsEncryptedObjectsStatus": { "base": null, "refs": { - "SseKmsEncryptedObjects$Status": "

    Specifies whether Amazon S3 replicates objects created with server-side encryption using an AWS KMS-managed key.

    " + "SseKmsEncryptedObjects$Status": "

    Specifies whether Amazon S3 replicates objects created with server-side encryption using a customer master key (CMK) stored in AWS Key Management Service.

    " } }, "Start": { "base": null, "refs": { - "ScanRange$Start": "

    Specifies the start of the byte range. This parameter is optional. Valid values: non-negative integers. The default value is 0.

    " + "ScanRange$Start": "

    Specifies the start of the byte range. This parameter is optional. Valid values: non-negative integers. The default value is 0. If only start is supplied, it means scan from that point to the end of the file.For example; <scanrange><start>50</start></scanrange> means scan from byte 50 until the end of the file.

    " } }, "StartAfter": { "base": null, "refs": { - "ListObjectsV2Output$StartAfter": "

    StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket

    ", - "ListObjectsV2Request$StartAfter": "

    StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket

    " + "ListObjectsV2Output$StartAfter": "

    If StartAfter was sent with the request, it is included in the response.

    ", + "ListObjectsV2Request$StartAfter": "

    StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts listing after this specified key. StartAfter can be any key in the bucket.

    " } }, "Stats": { - "base": "

    ", + "base": "

    Container for the stats details.

    ", "refs": { "StatsEvent$Details": "

    The Stats event details.

    " } }, "StatsEvent": { - "base": "

    ", + "base": "

    Container for the Stats Event.

    ", "refs": { "SelectObjectContentEventStream$Stats": "

    The Stats Event.

    " } @@ -3372,11 +3421,11 @@ "CopyObjectRequest$StorageClass": "

    The type of storage to use for the object. Defaults to 'STANDARD'.

    ", "CreateMultipartUploadRequest$StorageClass": "

    The type of storage to use for the object. Defaults to 'STANDARD'.

    ", "Destination$StorageClass": "

    The storage class to use when replicating objects, such as standard or reduced redundancy. By default, Amazon S3 uses the storage class of the source object to create the object replica.

    For valid values, see the StorageClass element of the PUT Bucket replication action in the Amazon Simple Storage Service API Reference.

    ", - "GetObjectOutput$StorageClass": "

    ", - "HeadObjectOutput$StorageClass": "

    ", - "ListPartsOutput$StorageClass": "

    The class of storage used to store the object.

    ", + "GetObjectOutput$StorageClass": "

    Provides storage class information of the object. Amazon S3 returns this header for all objects except for Standard storage class objects.

    ", + "HeadObjectOutput$StorageClass": "

    Provides storage class information of the object. Amazon S3 returns this header for all objects except for Standard storage class objects.

    For more information, see Storage Classes.

    ", + "ListPartsOutput$StorageClass": "

    Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded object.

    ", "MultipartUpload$StorageClass": "

    The class of storage used to store the object.

    ", - "PutObjectRequest$StorageClass": "

    The type of storage to use for the object. Defaults to 'STANDARD'.

    ", + "PutObjectRequest$StorageClass": "

    If you don't specify, Standard is the default storage class. Amazon S3 supports other storage classes.

    ", "S3Location$StorageClass": "

    The class of storage used to store the restore results.

    " } }, @@ -3387,7 +3436,7 @@ } }, "StorageClassAnalysisDataExport": { - "base": "

    ", + "base": "

    Container for data related to the storage class analysis for an Amazon S3 bucket for export.

    ", "refs": { "StorageClassAnalysis$DataExport": "

    Specifies how data related to the storage class analysis for an Amazon S3 bucket should be exported.

    " } @@ -3405,7 +3454,7 @@ } }, "Tag": { - "base": "

    ", + "base": "

    A container of a key value name pair.

    ", "refs": { "AnalyticsFilter$Tag": "

    The tag to use when evaluating an analytics filter.

    ", "LifecycleRuleFilter$Tag": "

    This tag must exist in the object's tag set in order for the rule to apply.

    ", @@ -3424,19 +3473,19 @@ "base": null, "refs": { "AnalyticsAndOperator$Tags": "

    The list of tags to use when evaluating an AND predicate.

    ", - "GetBucketTaggingOutput$TagSet": "

    ", - "GetObjectTaggingOutput$TagSet": "

    ", + "GetBucketTaggingOutput$TagSet": "

    Contains the tag set.

    ", + "GetObjectTaggingOutput$TagSet": "

    Contains the tag set.

    ", "LifecycleRuleAndOperator$Tags": "

    All of these tags must exist in the object's tag set in order for the rule to apply.

    ", "MetricsAndOperator$Tags": "

    The list of tags used when evaluating an AND predicate.

    ", - "ReplicationRuleAndOperator$Tags": "

    ", - "Tagging$TagSet": "

    " + "ReplicationRuleAndOperator$Tags": "

    An array of tags containing key and value pairs.

    ", + "Tagging$TagSet": "

    A collection for a a set of tags

    " } }, "Tagging": { - "base": "

    ", + "base": "

    Container for TagSet elements.

    ", "refs": { - "PutBucketTaggingRequest$Tagging": "

    ", - "PutObjectTaggingRequest$Tagging": "

    ", + "PutBucketTaggingRequest$Tagging": "

    Container for the TagSet and Tag elements.

    ", + "PutObjectTaggingRequest$Tagging": "

    Container for the TagSet and Tag elements

    ", "S3Location$Tagging": "

    The tag-set that is applied to the restore results.

    " } }, @@ -3461,7 +3510,7 @@ } }, "TargetGrant": { - "base": "

    ", + "base": "

    Container for granting information.

    ", "refs": { "TargetGrants$member": null } @@ -3469,7 +3518,7 @@ "TargetGrants": { "base": null, "refs": { - "LoggingEnabled$TargetGrants": "

    " + "LoggingEnabled$TargetGrants": "

    Container for granting information.

    " } }, "TargetPrefix": { @@ -3488,14 +3537,14 @@ "Token": { "base": null, "refs": { - "ListBucketAnalyticsConfigurationsOutput$ContinuationToken": "

    The ContinuationToken that represents where this request began.

    ", + "ListBucketAnalyticsConfigurationsOutput$ContinuationToken": "

    The marker that is used as a starting point for this analytics configuration list response. This value is present if it was sent in the request.

    ", "ListBucketAnalyticsConfigurationsRequest$ContinuationToken": "

    The ContinuationToken that represents a placeholder from where this request should begin.

    ", "ListBucketInventoryConfigurationsOutput$ContinuationToken": "

    If sent in the request, the marker that is used as a starting point for this inventory configuration list response.

    ", "ListBucketInventoryConfigurationsRequest$ContinuationToken": "

    The marker used to continue an inventory configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.

    ", "ListBucketMetricsConfigurationsOutput$ContinuationToken": "

    The marker that is used as a starting point for this metrics configuration list response. This value is present if it was sent in the request.

    ", "ListBucketMetricsConfigurationsRequest$ContinuationToken": "

    The marker that is used to continue a metrics configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.

    ", - "ListObjectsV2Output$ContinuationToken": "

    ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key

    ", - "ListObjectsV2Request$ContinuationToken": "

    ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key

    " + "ListObjectsV2Output$ContinuationToken": "

    If ContinuationToken was sent with the request, it is included in the response.

    ", + "ListObjectsV2Request$ContinuationToken": "

    ContinuationToken indicates Amazon S3 that the list is being continued on this bucket with a token. ContinuationToken is obfuscated and is not a real key.

    " } }, "TopicArn": { @@ -3512,9 +3561,9 @@ } }, "TopicConfigurationDeprecated": { - "base": "

    ", + "base": "

    A container for specifying the configuration for publication of messages to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects specified events. This data type is deperecated. Please use TopicConfiguration instead.

    ", "refs": { - "NotificationConfigurationDeprecated$TopicConfiguration": "

    " + "NotificationConfigurationDeprecated$TopicConfiguration": "

    This data type is deperecated. A container for specifying the configuration for publication of messages to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects specified events.

    " } }, "TopicConfigurationList": { @@ -3526,14 +3575,14 @@ "Transition": { "base": "

    Specifies when an object transitions to a specified storage class.

    ", "refs": { - "Rule$Transition": "

    ", + "Rule$Transition": "

    Specifies when an object transitions to a specified storage class.

    ", "TransitionList$member": null } }, "TransitionList": { "base": null, "refs": { - "LifecycleRule$Transitions": "

    " + "LifecycleRule$Transitions": "

    Specifies when an Amazon S3 object transitions to a specified storage class.

    " } }, "TransitionStorageClass": { @@ -3559,7 +3608,7 @@ "base": null, "refs": { "ListMultipartUploadsOutput$UploadIdMarker": "

    Upload ID after which listing began.

    ", - "ListMultipartUploadsRequest$UploadIdMarker": "

    Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored.

    " + "ListMultipartUploadsRequest$UploadIdMarker": "

    Together with key-marker, specifies the multipart upload after which listing should begin. If key-marker is not specified, the upload-id-marker parameter is ignored. Otherwise, any multipart uploads for a key equal to the key-marker might be included in the list only if they have an upload ID lexicographically greater than the specified upload-id-marker.

    " } }, "UploadPartCopyOutput": { @@ -3597,20 +3646,20 @@ "VersionIdMarker": { "base": null, "refs": { - "ListObjectVersionsOutput$VersionIdMarker": "

    ", + "ListObjectVersionsOutput$VersionIdMarker": "

    Marks the last version of the Key returned in a truncated response.

    ", "ListObjectVersionsRequest$VersionIdMarker": "

    Specifies the object version you want to start listing from.

    " } }, "VersioningConfiguration": { "base": "

    Describes the versioning state of an Amazon S3 bucket. For more information, see PUT Bucket versioning in the Amazon Simple Storage Service API Reference.

    ", "refs": { - "PutBucketVersioningRequest$VersioningConfiguration": "

    " + "PutBucketVersioningRequest$VersioningConfiguration": "

    Container for setting the versioning state.

    " } }, "WebsiteConfiguration": { "base": "

    Specifies website configuration parameters for an Amazon S3 bucket.

    ", "refs": { - "PutBucketWebsiteRequest$WebsiteConfiguration": "

    " + "PutBucketWebsiteRequest$WebsiteConfiguration": "

    Container for the request.

    " } }, "WebsiteRedirectLocation": { @@ -3620,7 +3669,7 @@ "CreateMultipartUploadRequest$WebsiteRedirectLocation": "

    If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.

    ", "GetObjectOutput$WebsiteRedirectLocation": "

    If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.

    ", "HeadObjectOutput$WebsiteRedirectLocation": "

    If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.

    ", - "PutObjectRequest$WebsiteRedirectLocation": "

    If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata.

    " + "PutObjectRequest$WebsiteRedirectLocation": "

    If the bucket is configured as a website, redirects requests for this object to another object in the same bucket or to an external URL. Amazon S3 stores the value of this header in the object metadata. For information about object metadata, see .

    In the following example, the request header sets the redirect to an object (anotherPage.html) in the same bucket:

    x-amz-website-redirect-location: /anotherPage.html

    In the following example, the request header sets the object redirect to another website:

    x-amz-website-redirect-location: http://www.example.com/

    For more information about website hosting in Amazon S3, see Hosting Websites on Amazon S3 and How to Configure Website Page Redirects.

    " } }, "Years": { diff --git a/models/apis/s3/2006-03-01/examples-1.json b/models/apis/s3/2006-03-01/examples-1.json index 0732c2fba95..9a43ef6daba 100644 --- a/models/apis/s3/2006-03-01/examples-1.json +++ b/models/apis/s3/2006-03-01/examples-1.json @@ -84,10 +84,13 @@ "CreateBucket": [ { "input": { - "Bucket": "examplebucket" + "Bucket": "examplebucket", + "CreateBucketConfiguration": { + "LocationConstraint": "eu-west-1" + } }, "output": { - "Location": "/examplebucket" + "Location": "http://examplebucket.s3.amazonaws.com/" }, "comments": { "input": { @@ -95,19 +98,16 @@ "output": { } }, - "description": "The following example creates a bucket.", - "id": "to-create-a-bucket--1472851826060", - "title": "To create a bucket " + "description": "The following example creates a bucket. The request specifies an AWS region where to create the bucket.", + "id": "to-create-a-bucket-in-a-specific-region-1483399072992", + "title": "To create a bucket in a specific region" }, { "input": { - "Bucket": "examplebucket", - "CreateBucketConfiguration": { - "LocationConstraint": "eu-west-1" - } + "Bucket": "examplebucket" }, "output": { - "Location": "http://examplebucket.s3.amazonaws.com/" + "Location": "/examplebucket" }, "comments": { "input": { @@ -115,9 +115,9 @@ "output": { } }, - "description": "The following example creates a bucket. The request specifies an AWS region where to create the bucket.", - "id": "to-create-a-bucket-in-a-specific-region-1483399072992", - "title": "To create a bucket in a specific region" + "description": "The following example creates a bucket.", + "id": "to-create-a-bucket--1472851826060", + "title": "To create a bucket " } ], "CreateMultipartUpload": [ @@ -334,10 +334,12 @@ "Delete": { "Objects": [ { - "Key": "objectkey1" + "Key": "HappyFace.jpg", + "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" }, { - "Key": "objectkey2" + "Key": "HappyFace.jpg", + "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" } ], "Quiet": false @@ -346,14 +348,12 @@ "output": { "Deleted": [ { - "DeleteMarker": "true", - "DeleteMarkerVersionId": "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F", - "Key": "objectkey1" + "Key": "HappyFace.jpg", + "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" }, { - "DeleteMarker": "true", - "DeleteMarkerVersionId": "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt", - "Key": "objectkey2" + "Key": "HappyFace.jpg", + "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" } ] }, @@ -363,9 +363,9 @@ "output": { } }, - "description": "The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker.", - "id": "to-delete-multiple-objects-from-a-versioned-bucket-1483146248805", - "title": "To delete multiple objects from a versioned bucket" + "description": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.", + "id": "to-delete-multiple-object-versions-from-a-versioned-bucket-1483147087737", + "title": "To delete multiple object versions from a versioned bucket" }, { "input": { @@ -373,12 +373,10 @@ "Delete": { "Objects": [ { - "Key": "HappyFace.jpg", - "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" + "Key": "objectkey1" }, { - "Key": "HappyFace.jpg", - "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" + "Key": "objectkey2" } ], "Quiet": false @@ -387,12 +385,14 @@ "output": { "Deleted": [ { - "Key": "HappyFace.jpg", - "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" + "DeleteMarker": "true", + "DeleteMarkerVersionId": "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F", + "Key": "objectkey1" }, { - "Key": "HappyFace.jpg", - "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" + "DeleteMarker": "true", + "DeleteMarkerVersionId": "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt", + "Key": "objectkey2" } ] }, @@ -402,9 +402,9 @@ "output": { } }, - "description": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.", - "id": "to-delete-multiple-object-versions-from-a-versioned-bucket-1483147087737", - "title": "To delete multiple object versions from a versioned bucket" + "description": "The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker.", + "id": "to-delete-multiple-objects-from-a-versioned-bucket-1483146248805", + "title": "To delete multiple objects from a versioned bucket" } ], "GetBucketCors": [ @@ -989,37 +989,47 @@ "ListMultipartUploads": [ { "input": { - "Bucket": "examplebucket" + "Bucket": "examplebucket", + "KeyMarker": "nextkeyfrompreviousresponse", + "MaxUploads": "2", + "UploadIdMarker": "valuefrompreviousresponse" }, "output": { + "Bucket": "acl1", + "IsTruncated": true, + "KeyMarker": "", + "MaxUploads": "2", + "NextKeyMarker": "someobjectkey", + "NextUploadIdMarker": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", + "UploadIdMarker": "", "Uploads": [ { "Initiated": "2014-05-01T05:40:58.000Z", "Initiator": { - "DisplayName": "display-name", + "DisplayName": "ownder-display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "display-name", - "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" + "DisplayName": "mohanataws", + "ID": "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" + "UploadId": "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" }, { "Initiated": "2014-05-01T05:41:27.000Z", "Initiator": { - "DisplayName": "display-name", + "DisplayName": "ownder-display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "display-name", + "DisplayName": "ownder-display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" + "UploadId": "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" } ] }, @@ -1029,53 +1039,43 @@ "output": { } }, - "description": "The following example lists in-progress multipart uploads on a specific bucket.", - "id": "to-list-in-progress-multipart-uploads-on-a-bucket-1481852775260", - "title": "To list in-progress multipart uploads on a bucket" + "description": "The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next setup of multipart uploads.", + "id": "list-next-set-of-multipart-uploads-when-previous-result-is-truncated-1482428106748", + "title": "List next set of multipart uploads when previous result is truncated" }, { "input": { - "Bucket": "examplebucket", - "KeyMarker": "nextkeyfrompreviousresponse", - "MaxUploads": "2", - "UploadIdMarker": "valuefrompreviousresponse" + "Bucket": "examplebucket" }, "output": { - "Bucket": "acl1", - "IsTruncated": true, - "KeyMarker": "", - "MaxUploads": "2", - "NextKeyMarker": "someobjectkey", - "NextUploadIdMarker": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--", - "UploadIdMarker": "", "Uploads": [ { "Initiated": "2014-05-01T05:40:58.000Z", "Initiator": { - "DisplayName": "ownder-display-name", + "DisplayName": "display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "mohanataws", - "ID": "852b113e7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" + "DisplayName": "display-name", + "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "gZ30jIqlUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" + "UploadId": "examplelUa.CInXklLQtSMJITdUnoZ1Y5GACB5UckOtspm5zbDMCkPF_qkfZzMiFZ6dksmcnqxJyIBvQMG9X9Q--" }, { "Initiated": "2014-05-01T05:41:27.000Z", "Initiator": { - "DisplayName": "ownder-display-name", + "DisplayName": "display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "Key": "JavaFile", "Owner": { - "DisplayName": "ownder-display-name", + "DisplayName": "display-name", "ID": "examplee7a2f25102679df27bb0ae12b3f85be6f290b936c4393484be31bebcc" }, "StorageClass": "STANDARD", - "UploadId": "b7tZSqIlo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" + "UploadId": "examplelo91lv1iwvWpvCiJWugw2xXLPAD7Z8cJyX9.WiIRgNrdG6Ldsn.9FtS63TCl1Uf5faTB.1U5Ckcbmdw--" } ] }, @@ -1085,9 +1085,9 @@ "output": { } }, - "description": "The following example specifies the upload-id-marker and key-marker from previous truncated response to retrieve next setup of multipart uploads.", - "id": "list-next-set-of-multipart-uploads-when-previous-result-is-truncated-1482428106748", - "title": "List next set of multipart uploads when previous result is truncated" + "description": "The following example lists in-progress multipart uploads on a specific bucket.", + "id": "to-list-in-progress-multipart-uploads-on-a-bucket-1481852775260", + "title": "To list in-progress multipart uploads on a bucket" } ], "ListObjectVersions": [ @@ -1567,14 +1567,13 @@ "PutObject": [ { "input": { - "ACL": "authenticated-read", "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "exampleobject" + "Key": "objectkey" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr" + "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ" }, "comments": { "input": { @@ -1582,19 +1581,20 @@ "output": { } }, - "description": "The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-an-object-and-specify-canned-acl-1483397779571", - "title": "To upload an object and specify canned ACL." + "description": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-create-an-object-1483147613675", + "title": "To create an object." }, { "input": { - "Body": "HappyFace.jpg", + "Body": "c:\\HappyFace.jpg", "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "HappyFace.jpg", + "Tagging": "key1=value1&key2=value2" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" + "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a" }, "comments": { "input": { @@ -1602,19 +1602,22 @@ "output": { } }, - "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", - "id": "to-upload-an-object-1481760101010", - "title": "To upload an object" + "description": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.", + "id": "to-upload-an-object-and-specify-optional-tags-1481762310955", + "title": "To upload an object and specify optional tags" }, { "input": { "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "objectkey" + "Key": "exampleobject", + "ServerSideEncryption": "AES256", + "Tagging": "key1=value1&key2=value2" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ" + "ServerSideEncryption": "AES256", + "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt" }, "comments": { "input": { @@ -1622,23 +1625,19 @@ "output": { } }, - "description": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-create-an-object-1483147613675", - "title": "To create an object." + "description": "The following example uploads and object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831", + "title": "To upload an object and specify server-side encryption and object tags" }, { "input": { - "Body": "filetoupload", + "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "exampleobject", - "Metadata": { - "metadata1": "value1", - "metadata2": "value2" - } + "Key": "HappyFace.jpg" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0" + "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" }, "comments": { "input": { @@ -1646,20 +1645,20 @@ "output": { } }, - "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757", - "title": "To upload object and specify user-defined metadata" + "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", + "id": "to-upload-an-object-1481760101010", + "title": "To upload an object" }, { "input": { - "Body": "c:\\HappyFace.jpg", + "ACL": "authenticated-read", + "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "HappyFace.jpg", - "Tagging": "key1=value1&key2=value2" + "Key": "exampleobject" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a" + "VersionId": "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr" }, "comments": { "input": { @@ -1667,22 +1666,22 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.", - "id": "to-upload-an-object-and-specify-optional-tags-1481762310955", - "title": "To upload an object and specify optional tags" + "description": "The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-an-object-and-specify-canned-acl-1483397779571", + "title": "To upload an object and specify canned ACL." }, { "input": { - "Body": "filetoupload", + "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "exampleobject", + "Key": "HappyFace.jpg", "ServerSideEncryption": "AES256", - "Tagging": "key1=value1&key2=value2" + "StorageClass": "STANDARD_IA" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", "ServerSideEncryption": "AES256", - "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt" + "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp" }, "comments": { "input": { @@ -1690,22 +1689,23 @@ "output": { } }, - "description": "The following example uploads and object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831", - "title": "To upload an object and specify server-side encryption and object tags" + "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.", + "id": "to-upload-an-object-(specify-optional-headers)", + "title": "To upload an object (specify optional headers)" }, { "input": { - "Body": "HappyFace.jpg", + "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "HappyFace.jpg", - "ServerSideEncryption": "AES256", - "StorageClass": "STANDARD_IA" + "Key": "exampleobject", + "Metadata": { + "metadata1": "value1", + "metadata2": "value2" + } }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "ServerSideEncryption": "AES256", - "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp" + "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0" }, "comments": { "input": { @@ -1713,9 +1713,9 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.", - "id": "to-upload-an-object-(specify-optional-headers)", - "title": "To upload an object (specify optional headers)" + "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757", + "title": "To upload object and specify user-defined metadata" } ], "PutObjectAcl": [ diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 836675ad0fa..8918827c607 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -1195,6 +1195,7 @@ "members":{ "ContainerHostname":{"shape":"ContainerHostname"}, "Image":{"shape":"Image"}, + "Mode":{"shape":"ContainerMode"}, "ModelDataUrl":{"shape":"Url"}, "Environment":{"shape":"EnvironmentMap"}, "ModelPackageName":{"shape":"ArnOrName"} @@ -1210,6 +1211,13 @@ "max":63, "pattern":"^[a-zA-Z0-9](-*[a-zA-Z0-9])*" }, + "ContainerMode":{ + "type":"string", + "enum":[ + "SingleModel", + "MultiModel" + ] + }, "ContentClassifier":{ "type":"string", "enum":[ diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 1f560a01c5b..a5214fb2135 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -16,7 +16,7 @@ "CreateNotebookInstanceLifecycleConfig": "

    Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.

    Each lifecycle configuration script has a limit of 16384 characters.

    The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.

    View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].

    Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.

    For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

    ", "CreatePresignedNotebookInstanceUrl": "

    Returns a URL that you can use to connect to the Jupyter server from a notebook instance. In the Amazon SageMaker console, when you choose Open next to a notebook instance, Amazon SageMaker opens a new tab showing the Jupyter server home page from the notebook instance. The console uses this API to get the URL and show the page.

    IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the notebook instance.For example, you can restrict access to this API and to the URL that it returns to a list of IP addresses that you specify. Use the NotIpAddress condition operator and the aws:SourceIP condition context key to specify the list of IP addresses that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.

    The URL that you get from a call to is valid only for 5 minutes. If you try to use the URL after the 5-minute limit expires, you are directed to the AWS console sign-in page.

    ", "CreateTrainingJob": "

    Starts a model training job. After training completes, Amazon SageMaker saves the resulting model artifacts to an Amazon S3 location that you specify.

    If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts in a machine learning service other than Amazon SageMaker, provided that you know how to use them for inferences.

    In the request body, you provide the following:

    • AlgorithmSpecification - Identifies the training algorithm to use.

    • HyperParameters - Specify these algorithm-specific parameters to enable the estimation of model parameters during training. Hyperparameters can be tuned to optimize this learning process. For a list of hyperparameters for each training algorithm provided by Amazon SageMaker, see Algorithms.

    • InputDataConfig - Describes the training dataset and the Amazon S3, EFS, or FSx location where it is stored.

    • OutputDataConfig - Identifies the Amazon S3 bucket where you want Amazon SageMaker to save the results of model training.

    • ResourceConfig - Identifies the resources, ML compute instances, and ML storage volumes to deploy for model training. In distributed training, you specify more than one instance.

    • EnableManagedSpotTraining - Optimize the cost of training machine learning models by up to 80% by using Amazon EC2 Spot instances. For more information, see Managed Spot Training.

    • RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to perform tasks on your behalf during model training. You must grant this role the necessary permissions so that Amazon SageMaker can successfully complete model training.

    • StoppingCondition - To help cap training costs, use MaxRuntimeInSeconds to set a time limit for training. Use MaxWaitTimeInSeconds to specify how long you are willing to to wait for a managed spot training job to complete.

    For more information about Amazon SageMaker, see How It Works.

    ", - "CreateTransformJob": "

    Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.

    To perform batch transformations, you create a transform job and use the data that you have readily available.

    In the request body, you provide the following:

    • TransformJobName - Identifies the transform job. The name must be unique within an AWS Region in an AWS account.

    • ModelName - Identifies the model to use. ModelName must be the name of an existing Amazon SageMaker model in the same AWS Region and AWS account. For information on creating a model, see CreateModel.

    • TransformInput - Describes the dataset to be transformed and the Amazon S3 location where it is stored.

    • TransformOutput - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.

    • TransformResources - Identifies the ML compute instances for the transform job.

    For more information about how batch transformation works Amazon SageMaker, see How It Works.

    ", + "CreateTransformJob": "

    Starts a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify.

    To perform batch transformations, you create a transform job and use the data that you have readily available.

    In the request body, you provide the following:

    • TransformJobName - Identifies the transform job. The name must be unique within an AWS Region in an AWS account.

    • ModelName - Identifies the model to use. ModelName must be the name of an existing Amazon SageMaker model in the same AWS Region and AWS account. For information on creating a model, see CreateModel.

    • TransformInput - Describes the dataset to be transformed and the Amazon S3 location where it is stored.

    • TransformOutput - Identifies the Amazon S3 location where you want Amazon SageMaker to save the results from the transform job.

    • TransformResources - Identifies the ML compute instances for the transform job.

    For more information about how batch transformation works, see Batch Transform.

    ", "CreateWorkteam": "

    Creates a new work team for labeling your data. A work team is defined by one or more Amazon Cognito user pools. You must first create the user pools before you can create a work team.

    You cannot create more than 25 work teams in an account and region.

    ", "DeleteAlgorithm": "

    Removes the specified algorithm from your account.

    ", "DeleteCodeRepository": "

    Deletes the specified Git repository from your account.

    ", @@ -234,7 +234,7 @@ "BatchStrategy": { "base": null, "refs": { - "CreateTransformJobRequest$BatchStrategy": "

    Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.

    To enable the batch strategy, you must set SplitType to Line, RecordIO, or TFRecord.

    To use only one record when making an HTTP invocation request to a container, set BatchStrategy to SingleRecord and SplitType to Line.

    To fit as many records in a mini-batch as can fit within the MaxPayloadInMB limit, set BatchStrategy to MultiRecord and SplitType to Line.

    ", + "CreateTransformJobRequest$BatchStrategy": "

    Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.

    To enable the batch strategy, you must set the SplitType property of the DataProcessing object to Line, RecordIO, or TFRecord.

    To use only one record when making an HTTP invocation request to a container, set BatchStrategy to SingleRecord and SplitType to Line.

    To fit as many records in a mini-batch as can fit within the MaxPayloadInMB limit, set BatchStrategy to MultiRecord and SplitType to Line.

    ", "DescribeTransformJobResponse$BatchStrategy": "

    Specifies the number of records to include in a mini-batch for an HTTP inference request. A record is a single unit of input data that inference can be made on. For example, a single line in a CSV file is a record.

    To enable the batch strategy, you must set SplitType to Line, RecordIO, or TFRecord.

    ", "TransformJobDefinition$BatchStrategy": "

    A string that determines the number of records included in a single mini-batch.

    SingleRecord means only one record is used per mini-batch. MultiRecord means a mini-batch is set to contain as many records that can fit within the MaxPayloadInMB limit.

    " } @@ -489,6 +489,12 @@ "ModelPackageContainerDefinition$ContainerHostname": "

    The DNS host name for the Docker container.

    " } }, + "ContainerMode": { + "base": null, + "refs": { + "ContainerDefinition$Mode": "

    Specifies whether the container hosts a single model or multiple models.

    " + } + }, "ContentClassifier": { "base": null, "refs": { @@ -1224,7 +1230,7 @@ } }, "Filter": { - "base": "

    A conditional statement for a search expression that includes a Boolean operator, a resource property, and a value.

    If you don't specify an Operator and a Value, the filter searches for only the specified property. For example, defining a Filter for the FailureReason for the TrainingJob Resource searches for training job objects that have a value in the FailureReason field.

    If you specify a Value, but not an Operator, Amazon SageMaker uses the equals operator as the default.

    In search, there are several property types:

    Metrics

    To define a metric filter, enter a value using the form \"Metrics.<name>\", where <name> is a metric name. For example, the following filter searches for training jobs with an \"accuracy\" metric greater than \"0.9\":

    {

    \"Name\": \"Metrics.accuracy\",

    \"Operator\": \"GREATER_THAN\",

    \"Value\": \"0.9\"

    }

    HyperParameters

    To define a hyperparameter filter, enter a value with the form \"HyperParameters.<name>\". Decimal hyperparameter values are treated as a decimal in a comparison if the specified Value is also a decimal value. If the specified Value is an integer, the decimal hyperparameter values are treated as integers. For example, the following filter is satisfied by training jobs with a \"learning_rate\" hyperparameter that is less than \"0.5\":

    {

    \"Name\": \"HyperParameters.learning_rate\",

    \"Operator\": \"LESS_THAN\",

    \"Value\": \"0.5\"

    }

    Tags

    To define a tag filter, enter a value with the form \"Tags.<key>\".

    ", + "base": "

    A conditional statement for a search expression that includes a resource property, a Boolean operator, and a value.

    If you don't specify an Operator and a Value, the filter searches for only the specified property. For example, defining a Filter for the FailureReason for the TrainingJob Resource searches for training job objects that have a value in the FailureReason field.

    If you specify a Value, but not an Operator, Amazon SageMaker uses the equals operator as the default.

    In search, there are several property types:

    Metrics

    To define a metric filter, enter a value using the form \"Metrics.<name>\", where <name> is a metric name. For example, the following filter searches for training jobs with an \"accuracy\" metric greater than \"0.9\":

    {

    \"Name\": \"Metrics.accuracy\",

    \"Operator\": \"GREATER_THAN\",

    \"Value\": \"0.9\"

    }

    HyperParameters

    To define a hyperparameter filter, enter a value with the form \"HyperParameters.<name>\". Decimal hyperparameter values are treated as a decimal in a comparison if the specified Value is also a decimal value. If the specified Value is an integer, the decimal hyperparameter values are treated as integers. For example, the following filter is satisfied by training jobs with a \"learning_rate\" hyperparameter that is less than \"0.5\":

    {

    \"Name\": \"HyperParameters.learning_rate\",

    \"Operator\": \"LESS_THAN\",

    \"Value\": \"0.5\"

    }

    Tags

    To define a tag filter, enter a value with the form \"Tags.<key>\".

    ", "refs": { "FilterList$member": null } @@ -1313,8 +1319,8 @@ "HyperParameterScalingType": { "base": null, "refs": { - "ContinuousParameterRange$ScalingType": "

    The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

    Auto

    Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.

    Linear

    Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

    Logarithmic

    Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.

    Logarithmic scaling works only for ranges that have only values greater than 0.

    ReverseLogarithmic

    Hyperparemeter tuning searches the values in the hyperparameter range by using a reverse logarithmic scale.

    Reverse logarithmic scaling works only for ranges that are entirely within the range 0<=x<1.0.

    ", - "IntegerParameterRange$ScalingType": "

    The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

    Auto

    Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.

    Linear

    Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

    Logarithmic

    Hyperparemeter tuning searches the values in the hyperparameter range by using a logarithmic scale.

    Logarithmic scaling works only for ranges that have only values greater than 0.

    " + "ContinuousParameterRange$ScalingType": "

    The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

    Auto

    Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.

    Linear

    Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

    Logarithmic

    Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.

    Logarithmic scaling works only for ranges that have only values greater than 0.

    ReverseLogarithmic

    Hyperparameter tuning searches the values in the hyperparameter range by using a reverse logarithmic scale.

    Reverse logarithmic scaling works only for ranges that are entirely within the range 0<=x<1.0.

    ", + "IntegerParameterRange$ScalingType": "

    The scale that hyperparameter tuning uses to search the hyperparameter range. For information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:

    Auto

    Amazon SageMaker hyperparameter tuning chooses the best scale for the hyperparameter.

    Linear

    Hyperparameter tuning searches the values in the hyperparameter range by using a linear scale.

    Logarithmic

    Hyperparameter tuning searches the values in the hyperparameter range by using a logarithmic scale.

    Logarithmic scaling works only for ranges that have only values greater than 0.

    " } }, "HyperParameterSpecification": { @@ -1549,7 +1555,7 @@ "JoinSource": { "base": null, "refs": { - "DataProcessing$JoinSource": "

    Specifies the source of the data to join with the transformed data. The valid values are None and Input The default value is None which specifies not to join the input with the transformed data. If you want the batch transform job to join the original input data with the transformed data, set JoinSource to Input.

    For JSON or JSONLines objects, such as a JSON array, Amazon SageMaker adds the transformed data to the input JSON object in an attribute called SageMakerOutput. The joined result for JSON must be a key-value pair object. If the input is not a key-value pair object, Amazon SageMaker creates a new JSON file. In the new JSON file, and the input data is stored under the SageMakerInput key and the results are stored in SageMakerOutput.

    For CSV files, Amazon SageMaker combines the transformed data with the input data at the end of the input data and stores it in the output file. The joined data has the joined input data followed by the transformed data and the output is a CSV file.

    " + "DataProcessing$JoinSource": "

    Specifies the source of the data to join with the transformed data. The valid values are None and Input. The default value is None, which specifies not to join the input with the transformed data. If you want the batch transform job to join the original input data with the transformed data, set JoinSource to Input.

    For JSON or JSONLines objects, such as a JSON array, Amazon SageMaker adds the transformed data to the input JSON object in an attribute called SageMakerOutput. The joined result for JSON must be a key-value pair object. If the input is not a key-value pair object, Amazon SageMaker creates a new JSON file. In the new JSON file, and the input data is stored under the SageMakerInput key and the results are stored in SageMakerOutput.

    For CSV files, Amazon SageMaker combines the transformed data with the input data at the end of the input data and stores it in the output file. The joined data has the joined input data followed by the transformed data and the output is a CSV file.

    " } }, "JsonPath": { @@ -1562,15 +1568,15 @@ "KmsKeyId": { "base": null, "refs": { - "CreateEndpointConfigInput$KmsKeyId": "

    The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.

    Nitro-based instances do not support encryption with AWS KMS. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances, the call to CreateEndpointConfig fails.

    For a list of nitro-based instances, see Nitro-based Instances in the Amazon Elastic Compute Cloud User Guide for Linux Instances.

    For more information about storage volumes on nitro-based instances, see Amazon EBS and NVMe on Linux Instances.

    ", + "CreateEndpointConfigInput$KmsKeyId": "

    The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.

    Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a KmsKeyId when using an instance type with local storage. If any of the models that you specify in the ProductionVariants parameter use nitro-based instances with local storage, do not specify a value for the KmsKeyId parameter. If you specify a value for KmsKeyId when using any nitro-based instances with local storage, the call to CreateEndpointConfig fails.

    For a list of instance types that support local instance storage, see Instance Store Volumes.

    For more information about local instance storage encryption, see SSD Instance Store Volumes.

    ", "CreateNotebookInstanceInput$KmsKeyId": "

    The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see Enabling and Disabling Keys in the AWS Key Management Service Developer Guide.

    ", "DescribeEndpointConfigOutput$KmsKeyId": "

    AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.

    ", "DescribeNotebookInstanceOutput$KmsKeyId": "

    The AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.

    ", "LabelingJobOutputConfig$KmsKeyId": "

    The AWS Key Management Service ID of the key used to encrypt the output data, if any.

    If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for LabelingJobOutputConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

    The KMS key policy must grant permission to the IAM role that you specify in your CreateLabelingJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

    ", "LabelingJobResourceConfig$VolumeKmsKeyId": "

    The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job. The VolumeKmsKeyId can be any of the following formats:

    • // KMS Key ID

      \"1234abcd-12ab-34cd-56ef-1234567890ab\"

    • // Amazon Resource Name (ARN) of a KMS Key

      \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

    ", "OutputDataConfig$KmsKeyId": "

    The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

    • // KMS Key ID

      \"1234abcd-12ab-34cd-56ef-1234567890ab\"

    • // Amazon Resource Name (ARN) of a KMS Key

      \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

    • // KMS Key Alias

      \"alias/ExampleAlias\"

    • // Amazon Resource Name (ARN) of a KMS Key Alias

      \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"

    If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side encryption with KMS-managed keys for OutputDataConfig. If you use a bucket policy with an s3:PutObject permission that only allows objects with server-side encryption, set the condition key of s3:x-amz-server-side-encryption to \"aws:kms\". For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

    The KMS key policy must grant permission to the IAM role that you specify in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob requests. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

    ", - "ResourceConfig$VolumeKmsKeyId": "

    The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job. The VolumeKmsKeyId can be any of the following formats:

    • // KMS Key ID

      \"1234abcd-12ab-34cd-56ef-1234567890ab\"

    • // Amazon Resource Name (ARN) of a KMS Key

      \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

    ", - "TransformOutput$KmsKeyId": "

    The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

    • // KMS Key ID

      \"1234abcd-12ab-34cd-56ef-1234567890ab\"

    • // Amazon Resource Name (ARN) of a KMS Key

      \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

    • // KMS Key Alias

      \"alias/ExampleAlias\"

    • // Amazon Resource Name (ARN) of a KMS Key Alias

      \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"

    If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

    The KMS key policy must grant permission to the IAM role that you specify in your CreateTramsformJob request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

    ", + "ResourceConfig$VolumeKmsKeyId": "

    The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job.

    Certain Nitro-based instances include local storage, dependent on the instance type. Local storage volumes are encrypted using a hardware module on the instance. You can't request a VolumeKmsKeyId when using an instance type with local storage.

    For a list of instance types that support local instance storage, see Instance Store Volumes.

    For more information about local instance storage encryption, see SSD Instance Store Volumes.

    The VolumeKmsKeyId can be in any of the following formats:

    • // KMS Key ID

      \"1234abcd-12ab-34cd-56ef-1234567890ab\"

    • // Amazon Resource Name (ARN) of a KMS Key

      \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

    ", + "TransformOutput$KmsKeyId": "

    The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The KmsKeyId can be any of the following formats:

    • // KMS Key ID

      \"1234abcd-12ab-34cd-56ef-1234567890ab\"

    • // Amazon Resource Name (ARN) of a KMS Key

      \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

    • // KMS Key Alias

      \"alias/ExampleAlias\"

    • // Amazon Resource Name (ARN) of a KMS Key Alias

      \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"

    If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. For more information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer Guide.

    The KMS key policy must grant permission to the IAM role that you specify in your CreateModel request. For more information, see Using Key Policies in AWS KMS in the AWS Key Management Service Developer Guide.

    ", "TransformResources$VolumeKmsKeyId": "

    The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the batch transform job. The VolumeKmsKeyId can be any of the following formats:

    • // KMS Key ID

      \"1234abcd-12ab-34cd-56ef-1234567890ab\"

    • // Amazon Resource Name (ARN) of a KMS Key

      \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"

    " } }, @@ -1709,7 +1715,7 @@ } }, "LabelingJobStoppingConditions": { - "base": "

    A set of conditions for stopping a labeling job. If any of the conditions are met, the job is automatically stopped. You can use these conditions to control the cost of data labeling.

    ", + "base": "

    A set of conditions for stopping a labeling job. If any of the conditions are met, the job is automatically stopped. You can use these conditions to control the cost of data labeling.

    Labeling jobs fail after 30 days with an appropriate client error message.

    ", "refs": { "CreateLabelingJobRequest$StoppingConditions": "

    A set of conditions for stopping the labeling job. If any of the conditions are met, the job is automatically stopped. You can use these conditions to control the cost of data labeling.

    ", "DescribeLabelingJobResponse$StoppingConditions": "

    A set of conditions for stopping a labeling job. If any of the conditions are met, the job is automatically stopped.

    " @@ -2512,7 +2518,7 @@ "Operator": { "base": null, "refs": { - "Filter$Operator": "

    A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:

    Equals

    The specified resource in Name equals the specified Value.

    NotEquals

    The specified resource in Name does not equal the specified Value.

    GreaterThan

    The specified resource in Name is greater than the specified Value. Not supported for text-based properties.

    GreaterThanOrEqualTo

    The specified resource in Name is greater than or equal to the specified Value. Not supported for text-based properties.

    LessThan

    The specified resource in Name is less than the specified Value. Not supported for text-based properties.

    LessThanOrEqualTo

    The specified resource in Name is less than or equal to the specified Value. Not supported for text-based properties.

    Contains

    Only supported for text-based properties. The word-list of the property contains the specified Value.

    If you have specified a filter Value, the default is Equals.

    " + "Filter$Operator": "

    A Boolean binary operator that is used to evaluate the filter. The operator field contains one of the following values:

    Equals

    The specified resource in Name equals the specified Value.

    NotEquals

    The specified resource in Name does not equal the specified Value.

    GreaterThan

    The specified resource in Name is greater than the specified Value. Not supported for text-based properties.

    GreaterThanOrEqualTo

    The specified resource in Name is greater than or equal to the specified Value. Not supported for text-based properties.

    LessThan

    The specified resource in Name is less than the specified Value. Not supported for text-based properties.

    LessThanOrEqualTo

    The specified resource in Name is less than or equal to the specified Value. Not supported for text-based properties.

    Contains

    Only supported for text-based properties. The word-list of the property contains the specified Value. A SearchExpression can include only one Contains operator.

    If you have specified a filter Value, the default is Equals.

    " } }, "OrderKey": { @@ -2869,14 +2875,14 @@ "ModelArtifacts$S3ModelArtifacts": "

    The path of the S3 object that contains the model artifacts. For example, s3://bucket-name/keynameprefix/model.tar.gz.

    ", "OutputConfig$S3OutputLocation": "

    Identifies the S3 path where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.

    ", "OutputDataConfig$S3OutputPath": "

    Identifies the S3 path where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.

    ", - "S3DataSource$S3Uri": "

    Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

    • A key name prefix might look like this: s3://bucketname/exampleprefix.

    • A manifest might look like this: s3://bucketname/example.manifest

      The manifest is an S3 object which is a JSON file with the following format:

      [

      {\"prefix\": \"s3://customer_bucket/some/prefix/\"},

      \"relative/path/to/custdata-1\",

      \"relative/path/custdata-2\",

      ...

      ]

      The preceding JSON matches the following s3Uris:

      s3://customer_bucket/some/prefix/relative/path/to/custdata-1

      s3://customer_bucket/some/prefix/relative/path/custdata-2

      ...

      The complete set of s3uris in this manifest is the input data for the channel for this datasource. The object that each s3uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.

    ", + "S3DataSource$S3Uri": "

    Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

    • A key name prefix might look like this: s3://bucketname/exampleprefix.

    • A manifest might look like this: s3://bucketname/example.manifest

      The manifest is an S3 object which is a JSON file with the following format:

      The preceding JSON matches the following s3Uris:

      [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},

      \"relative/path/to/custdata-1\",

      \"relative/path/custdata-2\",

      ...

      \"relative/path/custdata-N\"

      ]

      The preceding JSON matches the following s3Uris:

      s3://customer_bucket/some/prefix/relative/path/to/custdata-1

      s3://customer_bucket/some/prefix/relative/path/custdata-2

      ...

      s3://customer_bucket/some/prefix/relative/path/custdata-N

      The complete set of s3uris in this manifest is the input data for the channel for this datasource. The object that each s3uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.

    ", "TransformOutput$S3OutputPath": "

    The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For example, s3://bucket-name/key-name-prefix.

    For every S3 object used as input for the transform job, batch transform stores the transformed data with an .out suffix in a corresponding subfolder in the location in the output prefix. For example, for the input data stored at s3://bucket-name/input-name-prefix/dataset01/data.csv, batch transform stores the transformed data at s3://bucket-name/output-name-prefix/input-name-prefix/data.csv.out. Batch transform doesn't upload partially processed objects. For an input S3 object that contains multiple records, it creates an .out file only if the transform job succeeds on the entire file. When the input contains multiple S3 objects, the batch transform job processes the listed S3 objects and uploads only the output for successfully processed objects. If any object fails in the transform job batch transform marks the job as failed to prompt investigation.

    ", - "TransformS3DataSource$S3Uri": "

    Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

    • A key name prefix might look like this: s3://bucketname/exampleprefix.

    • A manifest might look like this: s3://bucketname/example.manifest

      The manifest is an S3 object which is a JSON file with the following format:

      [

      {\"prefix\": \"s3://customer_bucket/some/prefix/\"},

      \"relative/path/to/custdata-1\",

      \"relative/path/custdata-2\",

      ...

      ]

      The preceding JSON matches the following S3Uris:

      s3://customer_bucket/some/prefix/relative/path/to/custdata-1

      s3://customer_bucket/some/prefix/relative/path/custdata-1

      ...

      The complete set of S3Uris in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.

    ", + "TransformS3DataSource$S3Uri": "

    Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

    • A key name prefix might look like this: s3://bucketname/exampleprefix.

    • A manifest might look like this: s3://bucketname/example.manifest

      The manifest is an S3 object which is a JSON file with the following format:

      [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},

      \"relative/path/to/custdata-1\",

      \"relative/path/custdata-2\",

      ...

      \"relative/path/custdata-N\"

      ]

      The preceding JSON matches the following s3Uris:

      s3://customer_bucket/some/prefix/relative/path/to/custdata-1

      s3://customer_bucket/some/prefix/relative/path/custdata-2

      ...

      s3://customer_bucket/some/prefix/relative/path/custdata-N

      The complete set of S3Uris in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.

    ", "UiConfig$UiTemplateS3Uri": "

    The Amazon S3 bucket location of the UI template. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.

    " } }, "SearchExpression": { - "base": "

    A multi-expression that searches for the specified resource or resources in a search. All resource objects that satisfy the expression's condition are included in the search results. You must specify at least one subexpression, filter, or nested filter. A SearchExpression can contain up to twenty elements.

    A SearchExpression contains the following components:

    • A list of Filter objects. Each filter defines a simple Boolean expression comprised of a resource property name, Boolean operator, and value.

    • A list of NestedFilter objects. Each nested filter defines a list of Boolean expressions using a list of resource properties. A nested filter is satisfied if a single object in the list satisfies all Boolean expressions.

    • A list of SearchExpression objects. A search expression object can be nested in a list of search expression objects.

    • A Boolean operator: And or Or.

    ", + "base": "

    A multi-expression that searches for the specified resource or resources in a search. All resource objects that satisfy the expression's condition are included in the search results. You must specify at least one subexpression, filter, or nested filter. A SearchExpression can contain up to twenty elements.

    A SearchExpression contains the following components:

    • A list of Filter objects. Each filter defines a simple Boolean expression comprised of a resource property name, Boolean operator, and value. A SearchExpression can include only one Contains operator.

    • A list of NestedFilter objects. Each nested filter defines a list of Boolean expressions using a list of resource properties. A nested filter is satisfied if a single object in the list satisfies all Boolean expressions.

    • A list of SearchExpression objects. A search expression object can be nested in a list of search expression objects.

    • A Boolean operator: And or Or.

    ", "refs": { "SearchExpressionList$member": null, "SearchRequest$SearchExpression": "

    A Boolean conditional statement. Resource objects must satisfy this condition to be included in search results. You must provide at least one subexpression, filter, or nested filter. The maximum number of recursive SubExpressions, NestedFilters, and Filters that can be included in a SearchExpression object is 50.

    " @@ -3021,7 +3027,7 @@ "SplitType": { "base": null, "refs": { - "TransformInput$SplitType": "

    The method to use to split the transform job's data files into smaller batches. Splitting is necessary when the total size of each object is too large to fit in a single request. You can also use data splitting to improve performance by processing multiple concurrent mini-batches. The default value for SplitType is None, which indicates that input data files are not split, and request payloads contain the entire contents of an input object. Set the value of this parameter to Line to split records on a newline character boundary. SplitType also supports a number of record-oriented binary data formats.

    When splitting is enabled, the size of a mini-batch depends on the values of the BatchStrategy and MaxPayloadInMB parameters. When the value of BatchStrategy is MultiRecord, Amazon SageMaker sends the maximum number of records in each request, up to the MaxPayloadInMB limit. If the value of BatchStrategy is SingleRecord, Amazon SageMaker sends individual records in each request.

    Some data formats represent a record as a binary payload wrapped with extra padding bytes. When splitting is applied to a binary data format, padding is removed if the value of BatchStrategy is set to SingleRecord. Padding is not removed if the value of BatchStrategy is set to MultiRecord.

    For more information about the RecordIO, see Data Format in the MXNet documentation. For more information about the TFRecord, see Consuming TFRecord data in the TensorFlow documentation.

    " + "TransformInput$SplitType": "

    The method to use to split the transform job's data files into smaller batches. Splitting is necessary when the total size of each object is too large to fit in a single request. You can also use data splitting to improve performance by processing multiple concurrent mini-batches. The default value for SplitType is None, which indicates that input data files are not split, and request payloads contain the entire contents of an input object. Set the value of this parameter to Line to split records on a newline character boundary. SplitType also supports a number of record-oriented binary data formats.

    When splitting is enabled, the size of a mini-batch depends on the values of the BatchStrategy and MaxPayloadInMB parameters. When the value of BatchStrategy is MultiRecord, Amazon SageMaker sends the maximum number of records in each request, up to the MaxPayloadInMB limit. If the value of BatchStrategy is SingleRecord, Amazon SageMaker sends individual records in each request.

    Some data formats represent a record as a binary payload wrapped with extra padding bytes. When splitting is applied to a binary data format, padding is removed if the value of BatchStrategy is set to SingleRecord. Padding is not removed if the value of BatchStrategy is set to MultiRecord.

    For more information about RecordIO, see Create a Dataset Using RecordIO in the MXNet documentation. For more information about TFRecord, see Consuming TFRecord data in the TensorFlow documentation.

    " } }, "StartNotebookInstanceInput": { @@ -3690,7 +3696,7 @@ "VolumeSizeInGB": { "base": null, "refs": { - "ResourceConfig$VolumeSizeInGB": "

    The size of the ML storage volume that you want to provision.

    ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File as the TrainingInputMode in the algorithm specification.

    You must specify sufficient ML storage for your scenario.

    Amazon SageMaker supports only the General Purpose SSD (gp2) ML storage volume type.

    " + "ResourceConfig$VolumeSizeInGB": "

    The size of the ML storage volume that you want to provision.

    ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File as the TrainingInputMode in the algorithm specification.

    You must specify sufficient ML storage for your scenario.

    Amazon SageMaker supports only the General Purpose SSD (gp2) ML storage volume type.

    Certain Nitro-based instances include local storage with a fixed total size, dependent on the instance type. When using these instances for training, Amazon SageMaker mounts the local instance storage instead of Amazon EBS gp2 storage. You can't request a VolumeSizeInGB greater than the total size of the local instance storage.

    For a list of instance types that support local instance storage, including the total size per instance type, see Instance Store Volumes.

    " } }, "VpcConfig": { diff --git a/models/apis/sesv2/2019-09-27/api-2.json b/models/apis/sesv2/2019-09-27/api-2.json new file mode 100644 index 00000000000..efca020db69 --- /dev/null +++ b/models/apis/sesv2/2019-09-27/api-2.json @@ -0,0 +1,2093 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2019-09-27", + "endpointPrefix":"email", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceAbbreviation":"Amazon SES V2", + "serviceFullName":"Amazon Simple Email Service", + "serviceId":"SESv2", + "signatureVersion":"v4", + "signingName":"ses", + "uid":"sesv2-2019-09-27" + }, + "operations":{ + "CreateConfigurationSet":{ + "name":"CreateConfigurationSet", + "http":{ + "method":"POST", + "requestUri":"/v2/email/configuration-sets" + }, + "input":{"shape":"CreateConfigurationSetRequest"}, + "output":{"shape":"CreateConfigurationSetResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ] + }, + "CreateConfigurationSetEventDestination":{ + "name":"CreateConfigurationSetEventDestination", + "http":{ + "method":"POST", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations" + }, + "input":{"shape":"CreateConfigurationSetEventDestinationRequest"}, + "output":{"shape":"CreateConfigurationSetEventDestinationResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "CreateDedicatedIpPool":{ + "name":"CreateDedicatedIpPool", + "http":{ + "method":"POST", + "requestUri":"/v2/email/dedicated-ip-pools" + }, + "input":{"shape":"CreateDedicatedIpPoolRequest"}, + "output":{"shape":"CreateDedicatedIpPoolResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ] + }, + "CreateDeliverabilityTestReport":{ + "name":"CreateDeliverabilityTestReport", + "http":{ + "method":"POST", + "requestUri":"/v2/email/deliverability-dashboard/test" + }, + "input":{"shape":"CreateDeliverabilityTestReportRequest"}, + "output":{"shape":"CreateDeliverabilityTestReportResponse"}, + "errors":[ + {"shape":"AccountSuspendedException"}, + {"shape":"SendingPausedException"}, + {"shape":"MessageRejected"}, + {"shape":"MailFromDomainNotVerifiedException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ] + }, + "CreateEmailIdentity":{ + "name":"CreateEmailIdentity", + "http":{ + "method":"POST", + "requestUri":"/v2/email/identities" + }, + "input":{"shape":"CreateEmailIdentityRequest"}, + "output":{"shape":"CreateEmailIdentityResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ] + }, + "DeleteConfigurationSet":{ + "name":"DeleteConfigurationSet", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}" + }, + "input":{"shape":"DeleteConfigurationSetRequest"}, + "output":{"shape":"DeleteConfigurationSetResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ] + }, + "DeleteConfigurationSetEventDestination":{ + "name":"DeleteConfigurationSetEventDestination", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations/{EventDestinationName}" + }, + "input":{"shape":"DeleteConfigurationSetEventDestinationRequest"}, + "output":{"shape":"DeleteConfigurationSetEventDestinationResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "DeleteDedicatedIpPool":{ + "name":"DeleteDedicatedIpPool", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/dedicated-ip-pools/{PoolName}" + }, + "input":{"shape":"DeleteDedicatedIpPoolRequest"}, + "output":{"shape":"DeleteDedicatedIpPoolResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ] + }, + "DeleteEmailIdentity":{ + "name":"DeleteEmailIdentity", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/identities/{EmailIdentity}" + }, + "input":{"shape":"DeleteEmailIdentityRequest"}, + "output":{"shape":"DeleteEmailIdentityResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"} + ] + }, + "GetAccount":{ + "name":"GetAccount", + "http":{ + "method":"GET", + "requestUri":"/v2/email/account" + }, + "input":{"shape":"GetAccountRequest"}, + "output":{"shape":"GetAccountResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "GetBlacklistReports":{ + "name":"GetBlacklistReports", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard/blacklist-report" + }, + "input":{"shape":"GetBlacklistReportsRequest"}, + "output":{"shape":"GetBlacklistReportsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ] + }, + "GetConfigurationSet":{ + "name":"GetConfigurationSet", + "http":{ + "method":"GET", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}" + }, + "input":{"shape":"GetConfigurationSetRequest"}, + "output":{"shape":"GetConfigurationSetResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "GetConfigurationSetEventDestinations":{ + "name":"GetConfigurationSetEventDestinations", + "http":{ + "method":"GET", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations" + }, + "input":{"shape":"GetConfigurationSetEventDestinationsRequest"}, + "output":{"shape":"GetConfigurationSetEventDestinationsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "GetDedicatedIp":{ + "name":"GetDedicatedIp", + "http":{ + "method":"GET", + "requestUri":"/v2/email/dedicated-ips/{IP}" + }, + "input":{"shape":"GetDedicatedIpRequest"}, + "output":{"shape":"GetDedicatedIpResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ] + }, + "GetDedicatedIps":{ + "name":"GetDedicatedIps", + "http":{ + "method":"GET", + "requestUri":"/v2/email/dedicated-ips" + }, + "input":{"shape":"GetDedicatedIpsRequest"}, + "output":{"shape":"GetDedicatedIpsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ] + }, + "GetDeliverabilityDashboardOptions":{ + "name":"GetDeliverabilityDashboardOptions", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard" + }, + "input":{"shape":"GetDeliverabilityDashboardOptionsRequest"}, + "output":{"shape":"GetDeliverabilityDashboardOptionsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"} + ] + }, + "GetDeliverabilityTestReport":{ + "name":"GetDeliverabilityTestReport", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard/test-reports/{ReportId}" + }, + "input":{"shape":"GetDeliverabilityTestReportRequest"}, + "output":{"shape":"GetDeliverabilityTestReportResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ] + }, + "GetDomainDeliverabilityCampaign":{ + "name":"GetDomainDeliverabilityCampaign", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard/campaigns/{CampaignId}" + }, + "input":{"shape":"GetDomainDeliverabilityCampaignRequest"}, + "output":{"shape":"GetDomainDeliverabilityCampaignResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"} + ] + }, + "GetDomainStatisticsReport":{ + "name":"GetDomainStatisticsReport", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard/statistics-report/{Domain}" + }, + "input":{"shape":"GetDomainStatisticsReportRequest"}, + "output":{"shape":"GetDomainStatisticsReportResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ] + }, + "GetEmailIdentity":{ + "name":"GetEmailIdentity", + "http":{ + "method":"GET", + "requestUri":"/v2/email/identities/{EmailIdentity}" + }, + "input":{"shape":"GetEmailIdentityRequest"}, + "output":{"shape":"GetEmailIdentityResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "ListConfigurationSets":{ + "name":"ListConfigurationSets", + "http":{ + "method":"GET", + "requestUri":"/v2/email/configuration-sets" + }, + "input":{"shape":"ListConfigurationSetsRequest"}, + "output":{"shape":"ListConfigurationSetsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "ListDedicatedIpPools":{ + "name":"ListDedicatedIpPools", + "http":{ + "method":"GET", + "requestUri":"/v2/email/dedicated-ip-pools" + }, + "input":{"shape":"ListDedicatedIpPoolsRequest"}, + "output":{"shape":"ListDedicatedIpPoolsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "ListDeliverabilityTestReports":{ + "name":"ListDeliverabilityTestReports", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard/test-reports" + }, + "input":{"shape":"ListDeliverabilityTestReportsRequest"}, + "output":{"shape":"ListDeliverabilityTestReportsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ] + }, + "ListDomainDeliverabilityCampaigns":{ + "name":"ListDomainDeliverabilityCampaigns", + "http":{ + "method":"GET", + "requestUri":"/v2/email/deliverability-dashboard/domains/{SubscribedDomain}/campaigns" + }, + "input":{"shape":"ListDomainDeliverabilityCampaignsRequest"}, + "output":{"shape":"ListDomainDeliverabilityCampaignsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"} + ] + }, + "ListEmailIdentities":{ + "name":"ListEmailIdentities", + "http":{ + "method":"GET", + "requestUri":"/v2/email/identities" + }, + "input":{"shape":"ListEmailIdentitiesRequest"}, + "output":{"shape":"ListEmailIdentitiesResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/v2/email/tags" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "PutAccountDedicatedIpWarmupAttributes":{ + "name":"PutAccountDedicatedIpWarmupAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/account/dedicated-ips/warmup" + }, + "input":{"shape":"PutAccountDedicatedIpWarmupAttributesRequest"}, + "output":{"shape":"PutAccountDedicatedIpWarmupAttributesResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "PutAccountSendingAttributes":{ + "name":"PutAccountSendingAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/account/sending" + }, + "input":{"shape":"PutAccountSendingAttributesRequest"}, + "output":{"shape":"PutAccountSendingAttributesResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "PutConfigurationSetDeliveryOptions":{ + "name":"PutConfigurationSetDeliveryOptions", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/delivery-options" + }, + "input":{"shape":"PutConfigurationSetDeliveryOptionsRequest"}, + "output":{"shape":"PutConfigurationSetDeliveryOptionsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "PutConfigurationSetReputationOptions":{ + "name":"PutConfigurationSetReputationOptions", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/reputation-options" + }, + "input":{"shape":"PutConfigurationSetReputationOptionsRequest"}, + "output":{"shape":"PutConfigurationSetReputationOptionsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "PutConfigurationSetSendingOptions":{ + "name":"PutConfigurationSetSendingOptions", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/sending" + }, + "input":{"shape":"PutConfigurationSetSendingOptionsRequest"}, + "output":{"shape":"PutConfigurationSetSendingOptionsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "PutConfigurationSetTrackingOptions":{ + "name":"PutConfigurationSetTrackingOptions", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/tracking-options" + }, + "input":{"shape":"PutConfigurationSetTrackingOptionsRequest"}, + "output":{"shape":"PutConfigurationSetTrackingOptionsResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "PutDedicatedIpInPool":{ + "name":"PutDedicatedIpInPool", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/dedicated-ips/{IP}/pool" + }, + "input":{"shape":"PutDedicatedIpInPoolRequest"}, + "output":{"shape":"PutDedicatedIpInPoolResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "PutDedicatedIpWarmupAttributes":{ + "name":"PutDedicatedIpWarmupAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/dedicated-ips/{IP}/warmup" + }, + "input":{"shape":"PutDedicatedIpWarmupAttributesRequest"}, + "output":{"shape":"PutDedicatedIpWarmupAttributesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "PutDeliverabilityDashboardOption":{ + "name":"PutDeliverabilityDashboardOption", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/deliverability-dashboard" + }, + "input":{"shape":"PutDeliverabilityDashboardOptionRequest"}, + "output":{"shape":"PutDeliverabilityDashboardOptionResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"} + ] + }, + "PutEmailIdentityDkimAttributes":{ + "name":"PutEmailIdentityDkimAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/identities/{EmailIdentity}/dkim" + }, + "input":{"shape":"PutEmailIdentityDkimAttributesRequest"}, + "output":{"shape":"PutEmailIdentityDkimAttributesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "PutEmailIdentityFeedbackAttributes":{ + "name":"PutEmailIdentityFeedbackAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/identities/{EmailIdentity}/feedback" + }, + "input":{"shape":"PutEmailIdentityFeedbackAttributesRequest"}, + "output":{"shape":"PutEmailIdentityFeedbackAttributesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "PutEmailIdentityMailFromAttributes":{ + "name":"PutEmailIdentityMailFromAttributes", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/identities/{EmailIdentity}/mail-from" + }, + "input":{"shape":"PutEmailIdentityMailFromAttributesRequest"}, + "output":{"shape":"PutEmailIdentityMailFromAttributesResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + }, + "SendEmail":{ + "name":"SendEmail", + "http":{ + "method":"POST", + "requestUri":"/v2/email/outbound-emails" + }, + "input":{"shape":"SendEmailRequest"}, + "output":{"shape":"SendEmailResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"LimitExceededException"}, + {"shape":"AccountSuspendedException"}, + {"shape":"SendingPausedException"}, + {"shape":"MessageRejected"}, + {"shape":"MailFromDomainNotVerifiedException"}, + {"shape":"NotFoundException"}, + {"shape":"BadRequestException"} + ] + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/v2/email/tags" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/v2/email/tags" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"} + ] + }, + "UpdateConfigurationSetEventDestination":{ + "name":"UpdateConfigurationSetEventDestination", + "http":{ + "method":"PUT", + "requestUri":"/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations/{EventDestinationName}" + }, + "input":{"shape":"UpdateConfigurationSetEventDestinationRequest"}, + "output":{"shape":"UpdateConfigurationSetEventDestinationResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ] + } + }, + "shapes":{ + "AccountSuspendedException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "AlreadyExistsException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "AmazonResourceName":{"type":"string"}, + "BadRequestException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "BehaviorOnMxFailure":{ + "type":"string", + "enum":[ + "USE_DEFAULT_VALUE", + "REJECT_MESSAGE" + ] + }, + "BlacklistEntries":{ + "type":"list", + "member":{"shape":"BlacklistEntry"} + }, + "BlacklistEntry":{ + "type":"structure", + "members":{ + "RblName":{"shape":"RblName"}, + "ListingTime":{"shape":"Timestamp"}, + "Description":{"shape":"BlacklistingDescription"} + } + }, + "BlacklistItemName":{"type":"string"}, + "BlacklistItemNames":{ + "type":"list", + "member":{"shape":"BlacklistItemName"} + }, + "BlacklistReport":{ + "type":"map", + "key":{"shape":"BlacklistItemName"}, + "value":{"shape":"BlacklistEntries"} + }, + "BlacklistingDescription":{"type":"string"}, + "Body":{ + "type":"structure", + "members":{ + "Text":{"shape":"Content"}, + "Html":{"shape":"Content"} + } + }, + "CampaignId":{"type":"string"}, + "Charset":{"type":"string"}, + "CloudWatchDestination":{ + "type":"structure", + "required":["DimensionConfigurations"], + "members":{ + "DimensionConfigurations":{"shape":"CloudWatchDimensionConfigurations"} + } + }, + "CloudWatchDimensionConfiguration":{ + "type":"structure", + "required":[ + "DimensionName", + "DimensionValueSource", + "DefaultDimensionValue" + ], + "members":{ + "DimensionName":{"shape":"DimensionName"}, + "DimensionValueSource":{"shape":"DimensionValueSource"}, + "DefaultDimensionValue":{"shape":"DefaultDimensionValue"} + } + }, + "CloudWatchDimensionConfigurations":{ + "type":"list", + "member":{"shape":"CloudWatchDimensionConfiguration"} + }, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":500}, + "exception":true + }, + "ConfigurationSetName":{"type":"string"}, + "ConfigurationSetNameList":{ + "type":"list", + "member":{"shape":"ConfigurationSetName"} + }, + "Content":{ + "type":"structure", + "required":["Data"], + "members":{ + "Data":{"shape":"MessageData"}, + "Charset":{"shape":"Charset"} + } + }, + "CreateConfigurationSetEventDestinationRequest":{ + "type":"structure", + "required":[ + "ConfigurationSetName", + "EventDestinationName", + "EventDestination" + ], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "EventDestinationName":{"shape":"EventDestinationName"}, + "EventDestination":{"shape":"EventDestinationDefinition"} + } + }, + "CreateConfigurationSetEventDestinationResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateConfigurationSetRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{"shape":"ConfigurationSetName"}, + "TrackingOptions":{"shape":"TrackingOptions"}, + "DeliveryOptions":{"shape":"DeliveryOptions"}, + "ReputationOptions":{"shape":"ReputationOptions"}, + "SendingOptions":{"shape":"SendingOptions"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateConfigurationSetResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateDedicatedIpPoolRequest":{ + "type":"structure", + "required":["PoolName"], + "members":{ + "PoolName":{"shape":"PoolName"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDedicatedIpPoolResponse":{ + "type":"structure", + "members":{ + } + }, + "CreateDeliverabilityTestReportRequest":{ + "type":"structure", + "required":[ + "FromEmailAddress", + "Content" + ], + "members":{ + "ReportName":{"shape":"ReportName"}, + "FromEmailAddress":{"shape":"EmailAddress"}, + "Content":{"shape":"EmailContent"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateDeliverabilityTestReportResponse":{ + "type":"structure", + "required":[ + "ReportId", + "DeliverabilityTestStatus" + ], + "members":{ + "ReportId":{"shape":"ReportId"}, + "DeliverabilityTestStatus":{"shape":"DeliverabilityTestStatus"} + } + }, + "CreateEmailIdentityRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{"shape":"Identity"}, + "Tags":{"shape":"TagList"} + } + }, + "CreateEmailIdentityResponse":{ + "type":"structure", + "members":{ + "IdentityType":{"shape":"IdentityType"}, + "VerifiedForSendingStatus":{"shape":"Enabled"}, + "DkimAttributes":{"shape":"DkimAttributes"} + } + }, + "CustomRedirectDomain":{"type":"string"}, + "DailyVolume":{ + "type":"structure", + "members":{ + "StartDate":{"shape":"Timestamp"}, + "VolumeStatistics":{"shape":"VolumeStatistics"}, + "DomainIspPlacements":{"shape":"DomainIspPlacements"} + } + }, + "DailyVolumes":{ + "type":"list", + "member":{"shape":"DailyVolume"} + }, + "DedicatedIp":{ + "type":"structure", + "required":[ + "Ip", + "WarmupStatus", + "WarmupPercentage" + ], + "members":{ + "Ip":{"shape":"Ip"}, + "WarmupStatus":{"shape":"WarmupStatus"}, + "WarmupPercentage":{"shape":"Percentage100Wrapper"}, + "PoolName":{"shape":"PoolName"} + } + }, + "DedicatedIpList":{ + "type":"list", + "member":{"shape":"DedicatedIp"} + }, + "DefaultDimensionValue":{"type":"string"}, + "DeleteConfigurationSetEventDestinationRequest":{ + "type":"structure", + "required":[ + "ConfigurationSetName", + "EventDestinationName" + ], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "EventDestinationName":{ + "shape":"EventDestinationName", + "location":"uri", + "locationName":"EventDestinationName" + } + } + }, + "DeleteConfigurationSetEventDestinationResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteConfigurationSetRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "location":"uri", + "locationName":"ConfigurationSetName" + } + } + }, + "DeleteConfigurationSetResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteDedicatedIpPoolRequest":{ + "type":"structure", + "required":["PoolName"], + "members":{ + "PoolName":{ + "shape":"PoolName", + "location":"uri", + "locationName":"PoolName" + } + } + }, + "DeleteDedicatedIpPoolResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteEmailIdentityRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "location":"uri", + "locationName":"EmailIdentity" + } + } + }, + "DeleteEmailIdentityResponse":{ + "type":"structure", + "members":{ + } + }, + "DeliverabilityDashboardAccountStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "PENDING_EXPIRATION", + "DISABLED" + ] + }, + "DeliverabilityTestReport":{ + "type":"structure", + "members":{ + "ReportId":{"shape":"ReportId"}, + "ReportName":{"shape":"ReportName"}, + "Subject":{"shape":"DeliverabilityTestSubject"}, + "FromEmailAddress":{"shape":"EmailAddress"}, + "CreateDate":{"shape":"Timestamp"}, + "DeliverabilityTestStatus":{"shape":"DeliverabilityTestStatus"} + } + }, + "DeliverabilityTestReports":{ + "type":"list", + "member":{"shape":"DeliverabilityTestReport"} + }, + "DeliverabilityTestStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "COMPLETED" + ] + }, + "DeliverabilityTestSubject":{"type":"string"}, + "DeliveryOptions":{ + "type":"structure", + "members":{ + "TlsPolicy":{"shape":"TlsPolicy"}, + "SendingPoolName":{"shape":"PoolName"} + } + }, + "Destination":{ + "type":"structure", + "members":{ + "ToAddresses":{"shape":"EmailAddressList"}, + "CcAddresses":{"shape":"EmailAddressList"}, + "BccAddresses":{"shape":"EmailAddressList"} + } + }, + "DimensionName":{"type":"string"}, + "DimensionValueSource":{ + "type":"string", + "enum":[ + "MESSAGE_TAG", + "EMAIL_HEADER", + "LINK_TAG" + ] + }, + "DkimAttributes":{ + "type":"structure", + "members":{ + "SigningEnabled":{"shape":"Enabled"}, + "Status":{"shape":"DkimStatus"}, + "Tokens":{"shape":"DnsTokenList"} + } + }, + "DkimStatus":{ + "type":"string", + "enum":[ + "PENDING", + "SUCCESS", + "FAILED", + "TEMPORARY_FAILURE", + "NOT_STARTED" + ] + }, + "DnsToken":{"type":"string"}, + "DnsTokenList":{ + "type":"list", + "member":{"shape":"DnsToken"} + }, + "Domain":{"type":"string"}, + "DomainDeliverabilityCampaign":{ + "type":"structure", + "members":{ + "CampaignId":{"shape":"CampaignId"}, + "ImageUrl":{"shape":"ImageUrl"}, + "Subject":{"shape":"Subject"}, + "FromAddress":{"shape":"Identity"}, + "SendingIps":{"shape":"IpList"}, + "FirstSeenDateTime":{"shape":"Timestamp"}, + "LastSeenDateTime":{"shape":"Timestamp"}, + "InboxCount":{"shape":"Volume"}, + "SpamCount":{"shape":"Volume"}, + "ReadRate":{"shape":"Percentage"}, + "DeleteRate":{"shape":"Percentage"}, + "ReadDeleteRate":{"shape":"Percentage"}, + "ProjectedVolume":{"shape":"Volume"}, + "Esps":{"shape":"Esps"} + } + }, + "DomainDeliverabilityCampaignList":{ + "type":"list", + "member":{"shape":"DomainDeliverabilityCampaign"} + }, + "DomainDeliverabilityTrackingOption":{ + "type":"structure", + "members":{ + "Domain":{"shape":"Domain"}, + "SubscriptionStartDate":{"shape":"Timestamp"}, + "InboxPlacementTrackingOption":{"shape":"InboxPlacementTrackingOption"} + } + }, + "DomainDeliverabilityTrackingOptions":{ + "type":"list", + "member":{"shape":"DomainDeliverabilityTrackingOption"} + }, + "DomainIspPlacement":{ + "type":"structure", + "members":{ + "IspName":{"shape":"IspName"}, + "InboxRawCount":{"shape":"Volume"}, + "SpamRawCount":{"shape":"Volume"}, + "InboxPercentage":{"shape":"Percentage"}, + "SpamPercentage":{"shape":"Percentage"} + } + }, + "DomainIspPlacements":{ + "type":"list", + "member":{"shape":"DomainIspPlacement"} + }, + "EmailAddress":{"type":"string"}, + "EmailAddressList":{ + "type":"list", + "member":{"shape":"EmailAddress"} + }, + "EmailContent":{ + "type":"structure", + "members":{ + "Simple":{"shape":"Message"}, + "Raw":{"shape":"RawMessage"}, + "Template":{"shape":"Template"} + } + }, + "Enabled":{"type":"boolean"}, + "Esp":{"type":"string"}, + "Esps":{ + "type":"list", + "member":{"shape":"Esp"} + }, + "EventDestination":{ + "type":"structure", + "required":[ + "Name", + "MatchingEventTypes" + ], + "members":{ + "Name":{"shape":"EventDestinationName"}, + "Enabled":{"shape":"Enabled"}, + "MatchingEventTypes":{"shape":"EventTypes"}, + "KinesisFirehoseDestination":{"shape":"KinesisFirehoseDestination"}, + "CloudWatchDestination":{"shape":"CloudWatchDestination"}, + "SnsDestination":{"shape":"SnsDestination"}, + "PinpointDestination":{"shape":"PinpointDestination"} + } + }, + "EventDestinationDefinition":{ + "type":"structure", + "members":{ + "Enabled":{"shape":"Enabled"}, + "MatchingEventTypes":{"shape":"EventTypes"}, + "KinesisFirehoseDestination":{"shape":"KinesisFirehoseDestination"}, + "CloudWatchDestination":{"shape":"CloudWatchDestination"}, + "SnsDestination":{"shape":"SnsDestination"}, + "PinpointDestination":{"shape":"PinpointDestination"} + } + }, + "EventDestinationName":{"type":"string"}, + "EventDestinations":{ + "type":"list", + "member":{"shape":"EventDestination"} + }, + "EventType":{ + "type":"string", + "enum":[ + "SEND", + "REJECT", + "BOUNCE", + "COMPLAINT", + "DELIVERY", + "OPEN", + "CLICK", + "RENDERING_FAILURE" + ] + }, + "EventTypes":{ + "type":"list", + "member":{"shape":"EventType"} + }, + "GeneralEnforcementStatus":{"type":"string"}, + "GetAccountRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAccountResponse":{ + "type":"structure", + "members":{ + "SendQuota":{"shape":"SendQuota"}, + "SendingEnabled":{"shape":"Enabled"}, + "DedicatedIpAutoWarmupEnabled":{"shape":"Enabled"}, + "EnforcementStatus":{"shape":"GeneralEnforcementStatus"}, + "ProductionAccessEnabled":{"shape":"Enabled"} + } + }, + "GetBlacklistReportsRequest":{ + "type":"structure", + "required":["BlacklistItemNames"], + "members":{ + "BlacklistItemNames":{ + "shape":"BlacklistItemNames", + "location":"querystring", + "locationName":"BlacklistItemNames" + } + } + }, + "GetBlacklistReportsResponse":{ + "type":"structure", + "required":["BlacklistReport"], + "members":{ + "BlacklistReport":{"shape":"BlacklistReport"} + } + }, + "GetConfigurationSetEventDestinationsRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "location":"uri", + "locationName":"ConfigurationSetName" + } + } + }, + "GetConfigurationSetEventDestinationsResponse":{ + "type":"structure", + "members":{ + "EventDestinations":{"shape":"EventDestinations"} + } + }, + "GetConfigurationSetRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "location":"uri", + "locationName":"ConfigurationSetName" + } + } + }, + "GetConfigurationSetResponse":{ + "type":"structure", + "members":{ + "ConfigurationSetName":{"shape":"ConfigurationSetName"}, + "TrackingOptions":{"shape":"TrackingOptions"}, + "DeliveryOptions":{"shape":"DeliveryOptions"}, + "ReputationOptions":{"shape":"ReputationOptions"}, + "SendingOptions":{"shape":"SendingOptions"}, + "Tags":{"shape":"TagList"} + } + }, + "GetDedicatedIpRequest":{ + "type":"structure", + "required":["Ip"], + "members":{ + "Ip":{ + "shape":"Ip", + "location":"uri", + "locationName":"IP" + } + } + }, + "GetDedicatedIpResponse":{ + "type":"structure", + "members":{ + "DedicatedIp":{"shape":"DedicatedIp"} + } + }, + "GetDedicatedIpsRequest":{ + "type":"structure", + "members":{ + "PoolName":{ + "shape":"PoolName", + "location":"querystring", + "locationName":"PoolName" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "location":"querystring", + "locationName":"PageSize" + } + } + }, + "GetDedicatedIpsResponse":{ + "type":"structure", + "members":{ + "DedicatedIps":{"shape":"DedicatedIpList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "GetDeliverabilityDashboardOptionsRequest":{ + "type":"structure", + "members":{ + } + }, + "GetDeliverabilityDashboardOptionsResponse":{ + "type":"structure", + "required":["DashboardEnabled"], + "members":{ + "DashboardEnabled":{"shape":"Enabled"}, + "SubscriptionExpiryDate":{"shape":"Timestamp"}, + "AccountStatus":{"shape":"DeliverabilityDashboardAccountStatus"}, + "ActiveSubscribedDomains":{"shape":"DomainDeliverabilityTrackingOptions"}, + "PendingExpirationSubscribedDomains":{"shape":"DomainDeliverabilityTrackingOptions"} + } + }, + "GetDeliverabilityTestReportRequest":{ + "type":"structure", + "required":["ReportId"], + "members":{ + "ReportId":{ + "shape":"ReportId", + "location":"uri", + "locationName":"ReportId" + } + } + }, + "GetDeliverabilityTestReportResponse":{ + "type":"structure", + "required":[ + "DeliverabilityTestReport", + "OverallPlacement", + "IspPlacements" + ], + "members":{ + "DeliverabilityTestReport":{"shape":"DeliverabilityTestReport"}, + "OverallPlacement":{"shape":"PlacementStatistics"}, + "IspPlacements":{"shape":"IspPlacements"}, + "Message":{"shape":"MessageContent"}, + "Tags":{"shape":"TagList"} + } + }, + "GetDomainDeliverabilityCampaignRequest":{ + "type":"structure", + "required":["CampaignId"], + "members":{ + "CampaignId":{ + "shape":"CampaignId", + "location":"uri", + "locationName":"CampaignId" + } + } + }, + "GetDomainDeliverabilityCampaignResponse":{ + "type":"structure", + "required":["DomainDeliverabilityCampaign"], + "members":{ + "DomainDeliverabilityCampaign":{"shape":"DomainDeliverabilityCampaign"} + } + }, + "GetDomainStatisticsReportRequest":{ + "type":"structure", + "required":[ + "Domain", + "StartDate", + "EndDate" + ], + "members":{ + "Domain":{ + "shape":"Identity", + "location":"uri", + "locationName":"Domain" + }, + "StartDate":{ + "shape":"Timestamp", + "location":"querystring", + "locationName":"StartDate" + }, + "EndDate":{ + "shape":"Timestamp", + "location":"querystring", + "locationName":"EndDate" + } + } + }, + "GetDomainStatisticsReportResponse":{ + "type":"structure", + "required":[ + "OverallVolume", + "DailyVolumes" + ], + "members":{ + "OverallVolume":{"shape":"OverallVolume"}, + "DailyVolumes":{"shape":"DailyVolumes"} + } + }, + "GetEmailIdentityRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "location":"uri", + "locationName":"EmailIdentity" + } + } + }, + "GetEmailIdentityResponse":{ + "type":"structure", + "members":{ + "IdentityType":{"shape":"IdentityType"}, + "FeedbackForwardingStatus":{"shape":"Enabled"}, + "VerifiedForSendingStatus":{"shape":"Enabled"}, + "DkimAttributes":{"shape":"DkimAttributes"}, + "MailFromAttributes":{"shape":"MailFromAttributes"}, + "Tags":{"shape":"TagList"} + } + }, + "Identity":{"type":"string"}, + "IdentityInfo":{ + "type":"structure", + "members":{ + "IdentityType":{"shape":"IdentityType"}, + "IdentityName":{"shape":"Identity"}, + "SendingEnabled":{"shape":"Enabled"} + } + }, + "IdentityInfoList":{ + "type":"list", + "member":{"shape":"IdentityInfo"} + }, + "IdentityType":{ + "type":"string", + "enum":[ + "EMAIL_ADDRESS", + "DOMAIN", + "MANAGED_DOMAIN" + ] + }, + "ImageUrl":{"type":"string"}, + "InboxPlacementTrackingOption":{ + "type":"structure", + "members":{ + "Global":{"shape":"Enabled"}, + "TrackedIsps":{"shape":"IspNameList"} + } + }, + "Ip":{"type":"string"}, + "IpList":{ + "type":"list", + "member":{"shape":"Ip"} + }, + "IspName":{"type":"string"}, + "IspNameList":{ + "type":"list", + "member":{"shape":"IspName"} + }, + "IspPlacement":{ + "type":"structure", + "members":{ + "IspName":{"shape":"IspName"}, + "PlacementStatistics":{"shape":"PlacementStatistics"} + } + }, + "IspPlacements":{ + "type":"list", + "member":{"shape":"IspPlacement"} + }, + "KinesisFirehoseDestination":{ + "type":"structure", + "required":[ + "IamRoleArn", + "DeliveryStreamArn" + ], + "members":{ + "IamRoleArn":{"shape":"AmazonResourceName"}, + "DeliveryStreamArn":{"shape":"AmazonResourceName"} + } + }, + "LastFreshStart":{"type":"timestamp"}, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "ListConfigurationSetsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "location":"querystring", + "locationName":"PageSize" + } + } + }, + "ListConfigurationSetsResponse":{ + "type":"structure", + "members":{ + "ConfigurationSets":{"shape":"ConfigurationSetNameList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListDedicatedIpPoolsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "location":"querystring", + "locationName":"PageSize" + } + } + }, + "ListDedicatedIpPoolsResponse":{ + "type":"structure", + "members":{ + "DedicatedIpPools":{"shape":"ListOfDedicatedIpPools"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListDeliverabilityTestReportsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "location":"querystring", + "locationName":"PageSize" + } + } + }, + "ListDeliverabilityTestReportsResponse":{ + "type":"structure", + "required":["DeliverabilityTestReports"], + "members":{ + "DeliverabilityTestReports":{"shape":"DeliverabilityTestReports"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListDomainDeliverabilityCampaignsRequest":{ + "type":"structure", + "required":[ + "StartDate", + "EndDate", + "SubscribedDomain" + ], + "members":{ + "StartDate":{ + "shape":"Timestamp", + "location":"querystring", + "locationName":"StartDate" + }, + "EndDate":{ + "shape":"Timestamp", + "location":"querystring", + "locationName":"EndDate" + }, + "SubscribedDomain":{ + "shape":"Domain", + "location":"uri", + "locationName":"SubscribedDomain" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "location":"querystring", + "locationName":"PageSize" + } + } + }, + "ListDomainDeliverabilityCampaignsResponse":{ + "type":"structure", + "required":["DomainDeliverabilityCampaigns"], + "members":{ + "DomainDeliverabilityCampaigns":{"shape":"DomainDeliverabilityCampaignList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListEmailIdentitiesRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"NextToken" + }, + "PageSize":{ + "shape":"MaxItems", + "location":"querystring", + "locationName":"PageSize" + } + } + }, + "ListEmailIdentitiesResponse":{ + "type":"structure", + "members":{ + "EmailIdentities":{"shape":"IdentityInfoList"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListOfDedicatedIpPools":{ + "type":"list", + "member":{"shape":"PoolName"} + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "location":"querystring", + "locationName":"ResourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "required":["Tags"], + "members":{ + "Tags":{"shape":"TagList"} + } + }, + "MailFromAttributes":{ + "type":"structure", + "required":[ + "MailFromDomain", + "MailFromDomainStatus", + "BehaviorOnMxFailure" + ], + "members":{ + "MailFromDomain":{"shape":"MailFromDomainName"}, + "MailFromDomainStatus":{"shape":"MailFromDomainStatus"}, + "BehaviorOnMxFailure":{"shape":"BehaviorOnMxFailure"} + } + }, + "MailFromDomainName":{"type":"string"}, + "MailFromDomainNotVerifiedException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "MailFromDomainStatus":{ + "type":"string", + "enum":[ + "PENDING", + "SUCCESS", + "FAILED", + "TEMPORARY_FAILURE" + ] + }, + "Max24HourSend":{"type":"double"}, + "MaxItems":{"type":"integer"}, + "MaxSendRate":{"type":"double"}, + "Message":{ + "type":"structure", + "required":[ + "Subject", + "Body" + ], + "members":{ + "Subject":{"shape":"Content"}, + "Body":{"shape":"Body"} + } + }, + "MessageContent":{"type":"string"}, + "MessageData":{"type":"string"}, + "MessageRejected":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "MessageTag":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{"shape":"MessageTagName"}, + "Value":{"shape":"MessageTagValue"} + } + }, + "MessageTagList":{ + "type":"list", + "member":{"shape":"MessageTag"} + }, + "MessageTagName":{"type":"string"}, + "MessageTagValue":{"type":"string"}, + "NextToken":{"type":"string"}, + "NotFoundException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":404}, + "exception":true + }, + "OutboundMessageId":{"type":"string"}, + "OverallVolume":{ + "type":"structure", + "members":{ + "VolumeStatistics":{"shape":"VolumeStatistics"}, + "ReadRatePercent":{"shape":"Percentage"}, + "DomainIspPlacements":{"shape":"DomainIspPlacements"} + } + }, + "Percentage":{"type":"double"}, + "Percentage100Wrapper":{"type":"integer"}, + "PinpointDestination":{ + "type":"structure", + "members":{ + "ApplicationArn":{"shape":"AmazonResourceName"} + } + }, + "PlacementStatistics":{ + "type":"structure", + "members":{ + "InboxPercentage":{"shape":"Percentage"}, + "SpamPercentage":{"shape":"Percentage"}, + "MissingPercentage":{"shape":"Percentage"}, + "SpfPercentage":{"shape":"Percentage"}, + "DkimPercentage":{"shape":"Percentage"} + } + }, + "PoolName":{"type":"string"}, + "PutAccountDedicatedIpWarmupAttributesRequest":{ + "type":"structure", + "members":{ + "AutoWarmupEnabled":{"shape":"Enabled"} + } + }, + "PutAccountDedicatedIpWarmupAttributesResponse":{ + "type":"structure", + "members":{ + } + }, + "PutAccountSendingAttributesRequest":{ + "type":"structure", + "members":{ + "SendingEnabled":{"shape":"Enabled"} + } + }, + "PutAccountSendingAttributesResponse":{ + "type":"structure", + "members":{ + } + }, + "PutConfigurationSetDeliveryOptionsRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "TlsPolicy":{"shape":"TlsPolicy"}, + "SendingPoolName":{"shape":"SendingPoolName"} + } + }, + "PutConfigurationSetDeliveryOptionsResponse":{ + "type":"structure", + "members":{ + } + }, + "PutConfigurationSetReputationOptionsRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "ReputationMetricsEnabled":{"shape":"Enabled"} + } + }, + "PutConfigurationSetReputationOptionsResponse":{ + "type":"structure", + "members":{ + } + }, + "PutConfigurationSetSendingOptionsRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "SendingEnabled":{"shape":"Enabled"} + } + }, + "PutConfigurationSetSendingOptionsResponse":{ + "type":"structure", + "members":{ + } + }, + "PutConfigurationSetTrackingOptionsRequest":{ + "type":"structure", + "required":["ConfigurationSetName"], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "CustomRedirectDomain":{"shape":"CustomRedirectDomain"} + } + }, + "PutConfigurationSetTrackingOptionsResponse":{ + "type":"structure", + "members":{ + } + }, + "PutDedicatedIpInPoolRequest":{ + "type":"structure", + "required":[ + "Ip", + "DestinationPoolName" + ], + "members":{ + "Ip":{ + "shape":"Ip", + "location":"uri", + "locationName":"IP" + }, + "DestinationPoolName":{"shape":"PoolName"} + } + }, + "PutDedicatedIpInPoolResponse":{ + "type":"structure", + "members":{ + } + }, + "PutDedicatedIpWarmupAttributesRequest":{ + "type":"structure", + "required":[ + "Ip", + "WarmupPercentage" + ], + "members":{ + "Ip":{ + "shape":"Ip", + "location":"uri", + "locationName":"IP" + }, + "WarmupPercentage":{"shape":"Percentage100Wrapper"} + } + }, + "PutDedicatedIpWarmupAttributesResponse":{ + "type":"structure", + "members":{ + } + }, + "PutDeliverabilityDashboardOptionRequest":{ + "type":"structure", + "required":["DashboardEnabled"], + "members":{ + "DashboardEnabled":{"shape":"Enabled"}, + "SubscribedDomains":{"shape":"DomainDeliverabilityTrackingOptions"} + } + }, + "PutDeliverabilityDashboardOptionResponse":{ + "type":"structure", + "members":{ + } + }, + "PutEmailIdentityDkimAttributesRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "location":"uri", + "locationName":"EmailIdentity" + }, + "SigningEnabled":{"shape":"Enabled"} + } + }, + "PutEmailIdentityDkimAttributesResponse":{ + "type":"structure", + "members":{ + } + }, + "PutEmailIdentityFeedbackAttributesRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "location":"uri", + "locationName":"EmailIdentity" + }, + "EmailForwardingEnabled":{"shape":"Enabled"} + } + }, + "PutEmailIdentityFeedbackAttributesResponse":{ + "type":"structure", + "members":{ + } + }, + "PutEmailIdentityMailFromAttributesRequest":{ + "type":"structure", + "required":["EmailIdentity"], + "members":{ + "EmailIdentity":{ + "shape":"Identity", + "location":"uri", + "locationName":"EmailIdentity" + }, + "MailFromDomain":{"shape":"MailFromDomainName"}, + "BehaviorOnMxFailure":{"shape":"BehaviorOnMxFailure"} + } + }, + "PutEmailIdentityMailFromAttributesResponse":{ + "type":"structure", + "members":{ + } + }, + "RawMessage":{ + "type":"structure", + "required":["Data"], + "members":{ + "Data":{"shape":"RawMessageData"} + } + }, + "RawMessageData":{"type":"blob"}, + "RblName":{"type":"string"}, + "ReportId":{"type":"string"}, + "ReportName":{"type":"string"}, + "ReputationOptions":{ + "type":"structure", + "members":{ + "ReputationMetricsEnabled":{"shape":"Enabled"}, + "LastFreshStart":{"shape":"LastFreshStart"} + } + }, + "SendEmailRequest":{ + "type":"structure", + "required":[ + "Destination", + "Content" + ], + "members":{ + "FromEmailAddress":{"shape":"EmailAddress"}, + "Destination":{"shape":"Destination"}, + "ReplyToAddresses":{"shape":"EmailAddressList"}, + "FeedbackForwardingEmailAddress":{"shape":"EmailAddress"}, + "Content":{"shape":"EmailContent"}, + "EmailTags":{"shape":"MessageTagList"}, + "ConfigurationSetName":{"shape":"ConfigurationSetName"} + } + }, + "SendEmailResponse":{ + "type":"structure", + "members":{ + "MessageId":{"shape":"OutboundMessageId"} + } + }, + "SendQuota":{ + "type":"structure", + "members":{ + "Max24HourSend":{"shape":"Max24HourSend"}, + "MaxSendRate":{"shape":"MaxSendRate"}, + "SentLast24Hours":{"shape":"SentLast24Hours"} + } + }, + "SendingOptions":{ + "type":"structure", + "members":{ + "SendingEnabled":{"shape":"Enabled"} + } + }, + "SendingPausedException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":400}, + "exception":true + }, + "SendingPoolName":{"type":"string"}, + "SentLast24Hours":{"type":"double"}, + "SnsDestination":{ + "type":"structure", + "required":["TopicArn"], + "members":{ + "TopicArn":{"shape":"AmazonResourceName"} + } + }, + "Subject":{"type":"string"}, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{"type":"string"}, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{"shape":"AmazonResourceName"}, + "Tags":{"shape":"TagList"} + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{"type":"string"}, + "Template":{ + "type":"structure", + "members":{ + "TemplateArn":{"shape":"TemplateArn"}, + "TemplateData":{"shape":"TemplateData"} + } + }, + "TemplateArn":{"type":"string"}, + "TemplateData":{ + "type":"string", + "max":262144 + }, + "Timestamp":{"type":"timestamp"}, + "TlsPolicy":{ + "type":"string", + "enum":[ + "REQUIRE", + "OPTIONAL" + ] + }, + "TooManyRequestsException":{ + "type":"structure", + "members":{ + }, + "error":{"httpStatusCode":429}, + "exception":true + }, + "TrackingOptions":{ + "type":"structure", + "required":["CustomRedirectDomain"], + "members":{ + "CustomRedirectDomain":{"shape":"CustomRedirectDomain"} + } + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "location":"querystring", + "locationName":"ResourceArn" + }, + "TagKeys":{ + "shape":"TagKeyList", + "location":"querystring", + "locationName":"TagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateConfigurationSetEventDestinationRequest":{ + "type":"structure", + "required":[ + "ConfigurationSetName", + "EventDestinationName", + "EventDestination" + ], + "members":{ + "ConfigurationSetName":{ + "shape":"ConfigurationSetName", + "location":"uri", + "locationName":"ConfigurationSetName" + }, + "EventDestinationName":{ + "shape":"EventDestinationName", + "location":"uri", + "locationName":"EventDestinationName" + }, + "EventDestination":{"shape":"EventDestinationDefinition"} + } + }, + "UpdateConfigurationSetEventDestinationResponse":{ + "type":"structure", + "members":{ + } + }, + "Volume":{"type":"long"}, + "VolumeStatistics":{ + "type":"structure", + "members":{ + "InboxRawCount":{"shape":"Volume"}, + "SpamRawCount":{"shape":"Volume"}, + "ProjectedInbox":{"shape":"Volume"}, + "ProjectedSpam":{"shape":"Volume"} + } + }, + "WarmupStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "DONE" + ] + } + } +} diff --git a/models/apis/sesv2/2019-09-27/docs-2.json b/models/apis/sesv2/2019-09-27/docs-2.json new file mode 100644 index 00000000000..d955d412b54 --- /dev/null +++ b/models/apis/sesv2/2019-09-27/docs-2.json @@ -0,0 +1,1386 @@ +{ + "version": "2.0", + "service": "Amazon SES API v2

    Welcome to the Amazon SES API v2 Reference. This guide provides information about the Amazon SES API v2, including supported operations, data types, parameters, and schemas.

    Amazon SES is an AWS service that you can use to send email messages to your customers.

    If you're new to Amazon SES API v2, you might find it helpful to also review the Amazon Simple Email Service Developer Guide. The Amazon SES Developer Guide provides information and code samples that demonstrate how to use Amazon SES API v2 features programmatically.

    The Amazon SES API v2 is available in several AWS Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see AWS Service Endpoints in the Amazon Web Services General Reference. To learn more about AWS Regions, see Managing AWS Regions in the Amazon Web Services General Reference.

    In each Region, AWS maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see AWS Global Infrastructure.

    ", + "operations": { + "CreateConfigurationSet": "

    Create a configuration set. Configuration sets are groups of rules that you can apply to the emails that you send. You apply a configuration set to an email by specifying the name of the configuration set when you call the Amazon SES API v2. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.

    ", + "CreateConfigurationSetEventDestination": "

    Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

    A single configuration set can include more than one event destination.

    ", + "CreateDedicatedIpPool": "

    Create a new pool of dedicated IP addresses. A pool can include one or more dedicated IP addresses that are associated with your AWS account. You can associate a pool with a configuration set. When you send an email that uses that configuration set, the message is sent from one of the addresses in the associated pool.

    ", + "CreateDeliverabilityTestReport": "

    Create a new predictive inbox placement test. Predictive inbox placement tests can help you predict how your messages will be handled by various email providers around the world. When you perform a predictive inbox placement test, you provide a sample message that contains the content that you plan to send to your customers. Amazon SES API v2 then sends that message to special email addresses spread across several major email providers. After about 24 hours, the test is complete, and you can use the GetDeliverabilityTestReport operation to view the results of the test.

    ", + "CreateEmailIdentity": "

    Starts the process of verifying an email identity. An identity is an email address or domain that you use when you send email. Before you can use an identity to send email, you first have to verify it. By verifying an identity, you demonstrate that you're the owner of the identity, and that you've given Amazon SES API v2 permission to send email from the identity.

    When you verify an email address, Amazon SES sends an email to the address. Your email address is verified as soon as you follow the link in the verification email.

    When you verify a domain, this operation provides a set of DKIM tokens, which you can convert into CNAME tokens. You add these CNAME tokens to the DNS configuration for your domain. Your domain is verified when Amazon SES detects these records in the DNS configuration for your domain. For some DNS providers, it can take 72 hours or more to complete the domain verification process.

    ", + "DeleteConfigurationSet": "

    Delete an existing configuration set.

    Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.

    ", + "DeleteConfigurationSetEventDestination": "

    Delete an event destination.

    Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

    ", + "DeleteDedicatedIpPool": "

    Delete a dedicated IP pool.

    ", + "DeleteEmailIdentity": "

    Deletes an email identity. An identity can be either an email address or a domain name.

    ", + "GetAccount": "

    Obtain information about the email-sending status and capabilities of your Amazon SES account in the current AWS Region.

    ", + "GetBlacklistReports": "

    Retrieve a list of the blacklists that your dedicated IP addresses appear on.

    ", + "GetConfigurationSet": "

    Get information about an existing configuration set, including the dedicated IP pool that it's associated with, whether or not it's enabled for sending email, and more.

    Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.

    ", + "GetConfigurationSetEventDestinations": "

    Retrieve a list of event destinations that are associated with a configuration set.

    Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

    ", + "GetDedicatedIp": "

    Get information about a dedicated IP address, including the name of the dedicated IP pool that it's associated with, as well information about the automatic warm-up process for the address.

    ", + "GetDedicatedIps": "

    List the dedicated IP addresses that are associated with your AWS account.

    ", + "GetDeliverabilityDashboardOptions": "

    Retrieve information about the status of the Deliverability dashboard for your account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.

    When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

    ", + "GetDeliverabilityTestReport": "

    Retrieve the results of a predictive inbox placement test.

    ", + "GetDomainDeliverabilityCampaign": "

    Retrieve all the deliverability data for a specific campaign. This data is available for a campaign only if the campaign sent email by using a domain that the Deliverability dashboard is enabled for.

    ", + "GetDomainStatisticsReport": "

    Retrieve inbox placement and engagement rates for the domains that you use to send email.

    ", + "GetEmailIdentity": "

    Provides information about a specific identity, including the identity's verification status, its DKIM authentication status, and its custom Mail-From settings.

    ", + "ListConfigurationSets": "

    List all of the configuration sets associated with your account in the current region.

    Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.

    ", + "ListDedicatedIpPools": "

    List all of the dedicated IP pools that exist in your AWS account in the current Region.

    ", + "ListDeliverabilityTestReports": "

    Show a list of the predictive inbox placement tests that you've performed, regardless of their statuses. For predictive inbox placement tests that are complete, you can use the GetDeliverabilityTestReport operation to view the results.

    ", + "ListDomainDeliverabilityCampaigns": "

    Retrieve deliverability data for all the campaigns that used a specific domain to send email during a specified time range. This data is available for a domain only if you enabled the Deliverability dashboard for the domain.

    ", + "ListEmailIdentities": "

    Returns a list of all of the email identities that are associated with your AWS account. An identity can be either an email address or a domain. This operation returns identities that are verified as well as those that aren't. This operation returns identities that are associated with Amazon SES and Amazon Pinpoint.

    ", + "ListTagsForResource": "

    Retrieve a list of the tags (keys and values) that are associated with a specified resource. A tag is a label that you optionally define and associate with a resource. Each tag consists of a required tag key and an optional associated tag value. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.

    ", + "PutAccountDedicatedIpWarmupAttributes": "

    Enable or disable the automatic warm-up feature for dedicated IP addresses.

    ", + "PutAccountSendingAttributes": "

    Enable or disable the ability of your account to send email.

    ", + "PutConfigurationSetDeliveryOptions": "

    Associate a configuration set with a dedicated IP pool. You can use dedicated IP pools to create groups of dedicated IP addresses for sending specific types of email.

    ", + "PutConfigurationSetReputationOptions": "

    Enable or disable collection of reputation metrics for emails that you send using a particular configuration set in a specific AWS Region.

    ", + "PutConfigurationSetSendingOptions": "

    Enable or disable email sending for messages that use a particular configuration set in a specific AWS Region.

    ", + "PutConfigurationSetTrackingOptions": "

    Specify a custom domain to use for open and click tracking elements in email that you send.

    ", + "PutDedicatedIpInPool": "

    Move a dedicated IP address to an existing dedicated IP pool.

    The dedicated IP address that you specify must already exist, and must be associated with your AWS account.

    The dedicated IP pool you specify must already exist. You can create a new pool by using the CreateDedicatedIpPool operation.

    ", + "PutDedicatedIpWarmupAttributes": "

    ", + "PutDeliverabilityDashboardOption": "

    Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.

    When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

    ", + "PutEmailIdentityDkimAttributes": "

    Used to enable or disable DKIM authentication for an email identity.

    ", + "PutEmailIdentityFeedbackAttributes": "

    Used to enable or disable feedback forwarding for an identity. This setting determines what happens when an identity is used to send an email that results in a bounce or complaint event.

    If the value is true, you receive email notifications when bounce or complaint events occur. These notifications are sent to the address that you specified in the Return-Path header of the original email.

    You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications (for example, by setting up an event destination), you receive an email notification when these events occur (even if this setting is disabled).

    ", + "PutEmailIdentityMailFromAttributes": "

    Used to enable or disable the custom Mail-From domain configuration for an email identity.

    ", + "SendEmail": "

    Sends an email message. You can use the Amazon SES API v2 to send two types of messages:

    • Simple – A standard email message. When you create this type of message, you specify the sender, the recipient, and the message body, and the Amazon SES API v2 assembles the message for you.

    • Raw – A raw, MIME-formatted email message. When you send this type of email, you have to specify all of the message headers, as well as the message body. You can use this message type to send messages that contain attachments. The message that you specify has to be a valid MIME message.

    ", + "TagResource": "

    Add one or more tags (keys and values) to a specified resource. A tag is a label that you optionally define and associate with a resource. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.

    Each tag consists of a required tag key and an associated tag value, both of which you define. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.

    ", + "UntagResource": "

    Remove one or more tags (keys and values) from a specified resource.

    ", + "UpdateConfigurationSetEventDestination": "

    Update the configuration of an event destination for a configuration set.

    Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

    " + }, + "shapes": { + "AccountSuspendedException": { + "base": "

    The message can't be sent because the account's ability to send email has been permanently restricted.

    ", + "refs": { + } + }, + "AlreadyExistsException": { + "base": "

    The resource specified in your request already exists.

    ", + "refs": { + } + }, + "AmazonResourceName": { + "base": null, + "refs": { + "KinesisFirehoseDestination$IamRoleArn": "

    The Amazon Resource Name (ARN) of the IAM role that the Amazon SES API v2 uses to send email events to the Amazon Kinesis Data Firehose stream.

    ", + "KinesisFirehoseDestination$DeliveryStreamArn": "

    The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that the Amazon SES API v2 sends email events to.

    ", + "ListTagsForResourceRequest$ResourceArn": "

    The Amazon Resource Name (ARN) of the resource that you want to retrieve tag information for.

    ", + "PinpointDestination$ApplicationArn": "

    The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.

    ", + "SnsDestination$TopicArn": "

    The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

    ", + "TagResourceRequest$ResourceArn": "

    The Amazon Resource Name (ARN) of the resource that you want to add one or more tags to.

    ", + "UntagResourceRequest$ResourceArn": "

    The Amazon Resource Name (ARN) of the resource that you want to remove one or more tags from.

    " + } + }, + "BadRequestException": { + "base": "

    The input you provided is invalid.

    ", + "refs": { + } + }, + "BehaviorOnMxFailure": { + "base": "

    The action that you want to take if the required MX record can't be found when you send an email. When you set this value to UseDefaultValue, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.

    These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending, Failed, and TemporaryFailure states.

    ", + "refs": { + "MailFromAttributes$BehaviorOnMxFailure": "

    The action that you want to take if the required MX record can't be found when you send an email. When you set this value to UseDefaultValue, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.

    These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending, Failed, and TemporaryFailure states.

    ", + "PutEmailIdentityMailFromAttributesRequest$BehaviorOnMxFailure": "

    The action that you want to take if the required MX record isn't found when you send an email. When you set this value to UseDefaultValue, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.

    These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending, Failed, and TemporaryFailure states.

    " + } + }, + "BlacklistEntries": { + "base": null, + "refs": { + "BlacklistReport$value": null + } + }, + "BlacklistEntry": { + "base": "

    An object that contains information about a blacklisting event that impacts one of the dedicated IP addresses that is associated with your account.

    ", + "refs": { + "BlacklistEntries$member": null + } + }, + "BlacklistItemName": { + "base": "

    An IP address that you want to obtain blacklist information for.

    ", + "refs": { + "BlacklistItemNames$member": null, + "BlacklistReport$key": null + } + }, + "BlacklistItemNames": { + "base": null, + "refs": { + "GetBlacklistReportsRequest$BlacklistItemNames": "

    A list of IP addresses that you want to retrieve blacklist information about. You can only specify the dedicated IP addresses that you use to send email using Amazon SES or Amazon Pinpoint.

    " + } + }, + "BlacklistReport": { + "base": null, + "refs": { + "GetBlacklistReportsResponse$BlacklistReport": "

    An object that contains information about a blacklist that one of your dedicated IP addresses appears on.

    " + } + }, + "BlacklistingDescription": { + "base": "

    A description of the blacklisting event.

    ", + "refs": { + "BlacklistEntry$Description": "

    Additional information about the blacklisting event, as provided by the blacklist maintainer.

    " + } + }, + "Body": { + "base": "

    Represents the body of the email message.

    ", + "refs": { + "Message$Body": "

    The body of the message. You can specify an HTML version of the message, a text-only version of the message, or both.

    " + } + }, + "CampaignId": { + "base": null, + "refs": { + "DomainDeliverabilityCampaign$CampaignId": "

    The unique identifier for the campaign. The Deliverability dashboard automatically generates and assigns this identifier to a campaign.

    ", + "GetDomainDeliverabilityCampaignRequest$CampaignId": "

    The unique identifier for the campaign. The Deliverability dashboard automatically generates and assigns this identifier to a campaign.

    " + } + }, + "Charset": { + "base": null, + "refs": { + "Content$Charset": "

    The character set for the content. Because of the constraints of the SMTP protocol, the Amazon SES API v2 uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify UTF-8, ISO-8859-1, or Shift_JIS.

    " + } + }, + "CloudWatchDestination": { + "base": "

    An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.

    ", + "refs": { + "EventDestination$CloudWatchDestination": "

    An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.

    ", + "EventDestinationDefinition$CloudWatchDestination": "

    An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.

    " + } + }, + "CloudWatchDimensionConfiguration": { + "base": "

    An object that defines the dimension configuration to use when you send email events to Amazon CloudWatch.

    ", + "refs": { + "CloudWatchDimensionConfigurations$member": null + } + }, + "CloudWatchDimensionConfigurations": { + "base": null, + "refs": { + "CloudWatchDestination$DimensionConfigurations": "

    An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch.

    " + } + }, + "ConcurrentModificationException": { + "base": "

    The resource is being modified by another operation or thread.

    ", + "refs": { + } + }, + "ConfigurationSetName": { + "base": "

    The name of a configuration set.

    Configuration sets are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.

    ", + "refs": { + "ConfigurationSetNameList$member": null, + "CreateConfigurationSetEventDestinationRequest$ConfigurationSetName": "

    The name of the configuration set that you want to add an event destination to.

    ", + "CreateConfigurationSetRequest$ConfigurationSetName": "

    The name of the configuration set.

    ", + "DeleteConfigurationSetEventDestinationRequest$ConfigurationSetName": "

    The name of the configuration set that contains the event destination that you want to delete.

    ", + "DeleteConfigurationSetRequest$ConfigurationSetName": "

    The name of the configuration set that you want to delete.

    ", + "GetConfigurationSetEventDestinationsRequest$ConfigurationSetName": "

    The name of the configuration set that contains the event destination.

    ", + "GetConfigurationSetRequest$ConfigurationSetName": "

    The name of the configuration set that you want to obtain more information about.

    ", + "GetConfigurationSetResponse$ConfigurationSetName": "

    The name of the configuration set.

    ", + "PutConfigurationSetDeliveryOptionsRequest$ConfigurationSetName": "

    The name of the configuration set that you want to associate with a dedicated IP pool.

    ", + "PutConfigurationSetReputationOptionsRequest$ConfigurationSetName": "

    The name of the configuration set that you want to enable or disable reputation metric tracking for.

    ", + "PutConfigurationSetSendingOptionsRequest$ConfigurationSetName": "

    The name of the configuration set that you want to enable or disable email sending for.

    ", + "PutConfigurationSetTrackingOptionsRequest$ConfigurationSetName": "

    The name of the configuration set that you want to add a custom tracking domain to.

    ", + "SendEmailRequest$ConfigurationSetName": "

    The name of the configuration set that you want to use when sending the email.

    ", + "UpdateConfigurationSetEventDestinationRequest$ConfigurationSetName": "

    The name of the configuration set that contains the event destination that you want to modify.

    " + } + }, + "ConfigurationSetNameList": { + "base": null, + "refs": { + "ListConfigurationSetsResponse$ConfigurationSets": "

    An array that contains all of the configuration sets in your Amazon SES account in the current AWS Region.

    " + } + }, + "Content": { + "base": "

    An object that represents the content of the email, and optionally a character set specification.

    ", + "refs": { + "Body$Text": "

    An object that represents the version of the message that is displayed in email clients that don't support HTML, or clients where the recipient has disabled HTML rendering.

    ", + "Body$Html": "

    An object that represents the version of the message that is displayed in email clients that support HTML. HTML messages can include formatted text, hyperlinks, images, and more.

    ", + "Message$Subject": "

    The subject line of the email. The subject line can only contain 7-bit ASCII characters. However, you can specify non-ASCII characters in the subject line by using encoded-word syntax, as described in RFC 2047.

    " + } + }, + "CreateConfigurationSetEventDestinationRequest": { + "base": "

    A request to add an event destination to a configuration set.

    ", + "refs": { + } + }, + "CreateConfigurationSetEventDestinationResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "CreateConfigurationSetRequest": { + "base": "

    A request to create a configuration set.

    ", + "refs": { + } + }, + "CreateConfigurationSetResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "CreateDedicatedIpPoolRequest": { + "base": "

    A request to create a new dedicated IP pool.

    ", + "refs": { + } + }, + "CreateDedicatedIpPoolResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "CreateDeliverabilityTestReportRequest": { + "base": "

    A request to perform a predictive inbox placement test. Predictive inbox placement tests can help you predict how your messages will be handled by various email providers around the world. When you perform a predictive inbox placement test, you provide a sample message that contains the content that you plan to send to your customers. We send that message to special email addresses spread across several major email providers around the world. The test takes about 24 hours to complete. When the test is complete, you can use the GetDeliverabilityTestReport operation to view the results of the test.

    ", + "refs": { + } + }, + "CreateDeliverabilityTestReportResponse": { + "base": "

    Information about the predictive inbox placement test that you created.

    ", + "refs": { + } + }, + "CreateEmailIdentityRequest": { + "base": "

    A request to begin the verification process for an email identity (an email address or domain).

    ", + "refs": { + } + }, + "CreateEmailIdentityResponse": { + "base": "

    If the email identity is a domain, this object contains tokens that you can use to create a set of CNAME records. To sucessfully verify your domain, you have to add these records to the DNS configuration for your domain.

    If the email identity is an email address, this object is empty.

    ", + "refs": { + } + }, + "CustomRedirectDomain": { + "base": "

    The domain that you want to use for tracking open and click events.

    ", + "refs": { + "PutConfigurationSetTrackingOptionsRequest$CustomRedirectDomain": "

    The domain that you want to use to track open and click events.

    ", + "TrackingOptions$CustomRedirectDomain": "

    The domain that you want to use for tracking open and click events.

    " + } + }, + "DailyVolume": { + "base": "

    An object that contains information about the volume of email sent on each day of the analysis period.

    ", + "refs": { + "DailyVolumes$member": null + } + }, + "DailyVolumes": { + "base": null, + "refs": { + "GetDomainStatisticsReportResponse$DailyVolumes": "

    An object that contains deliverability metrics for the domain that you specified. This object contains data for each day, starting on the StartDate and ending on the EndDate.

    " + } + }, + "DedicatedIp": { + "base": "

    Contains information about a dedicated IP address that is associated with your Amazon SES API v2 account.

    To learn more about requesting dedicated IP addresses, see Requesting and Relinquishing Dedicated IP Addresses in the Amazon SES Developer Guide.

    ", + "refs": { + "DedicatedIpList$member": null, + "GetDedicatedIpResponse$DedicatedIp": "

    An object that contains information about a dedicated IP address.

    " + } + }, + "DedicatedIpList": { + "base": "

    A list of dedicated IP addresses that are associated with your AWS account.

    ", + "refs": { + "GetDedicatedIpsResponse$DedicatedIps": "

    A list of dedicated IP addresses that are associated with your AWS account.

    " + } + }, + "DefaultDimensionValue": { + "base": "

    The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. This value has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    ", + "refs": { + "CloudWatchDimensionConfiguration$DefaultDimensionValue": "

    The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. This value has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " + } + }, + "DeleteConfigurationSetEventDestinationRequest": { + "base": "

    A request to delete an event destination from a configuration set.

    ", + "refs": { + } + }, + "DeleteConfigurationSetEventDestinationResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "DeleteConfigurationSetRequest": { + "base": "

    A request to delete a configuration set.

    ", + "refs": { + } + }, + "DeleteConfigurationSetResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "DeleteDedicatedIpPoolRequest": { + "base": "

    A request to delete a dedicated IP pool.

    ", + "refs": { + } + }, + "DeleteDedicatedIpPoolResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "DeleteEmailIdentityRequest": { + "base": "

    A request to delete an existing email identity. When you delete an identity, you lose the ability to send email from that identity. You can restore your ability to send email by completing the verification process for the identity again.

    ", + "refs": { + } + }, + "DeleteEmailIdentityResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "DeliverabilityDashboardAccountStatus": { + "base": "

    The current status of your Deliverability dashboard subscription. If this value is PENDING_EXPIRATION, your subscription is scheduled to expire at the end of the current calendar month.

    ", + "refs": { + "GetDeliverabilityDashboardOptionsResponse$AccountStatus": "

    The current status of your Deliverability dashboard subscription. If this value is PENDING_EXPIRATION, your subscription is scheduled to expire at the end of the current calendar month.

    " + } + }, + "DeliverabilityTestReport": { + "base": "

    An object that contains metadata related to a predictive inbox placement test.

    ", + "refs": { + "DeliverabilityTestReports$member": null, + "GetDeliverabilityTestReportResponse$DeliverabilityTestReport": "

    An object that contains the results of the predictive inbox placement test.

    " + } + }, + "DeliverabilityTestReports": { + "base": null, + "refs": { + "ListDeliverabilityTestReportsResponse$DeliverabilityTestReports": "

    An object that contains a lists of predictive inbox placement tests that you've performed.

    " + } + }, + "DeliverabilityTestStatus": { + "base": "

    The status of a predictive inbox placement test. If the status is IN_PROGRESS, then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is COMPLETE, then the test is finished, and you can use the GetDeliverabilityTestReport operation to view the results of the test.

    ", + "refs": { + "CreateDeliverabilityTestReportResponse$DeliverabilityTestStatus": "

    The status of the predictive inbox placement test. If the status is IN_PROGRESS, then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is COMPLETE, then the test is finished, and you can use the GetDeliverabilityTestReport to view the results of the test.

    ", + "DeliverabilityTestReport$DeliverabilityTestStatus": "

    The status of the predictive inbox placement test. If the status is IN_PROGRESS, then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is COMPLETE, then the test is finished, and you can use the GetDeliverabilityTestReport to view the results of the test.

    " + } + }, + "DeliverabilityTestSubject": { + "base": "

    The subject line for an email that you submitted in a predictive inbox placement test.

    ", + "refs": { + "DeliverabilityTestReport$Subject": "

    The subject line for an email that you submitted in a predictive inbox placement test.

    " + } + }, + "DeliveryOptions": { + "base": "

    Used to associate a configuration set with a dedicated IP pool.

    ", + "refs": { + "CreateConfigurationSetRequest$DeliveryOptions": "

    An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set.

    ", + "GetConfigurationSetResponse$DeliveryOptions": "

    An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set.

    " + } + }, + "Destination": { + "base": "

    An object that describes the recipients for an email.

    ", + "refs": { + "SendEmailRequest$Destination": "

    An object that contains the recipients of the email message.

    " + } + }, + "DimensionName": { + "base": "

    The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:

    • It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    ", + "refs": { + "CloudWatchDimensionConfiguration$DimensionName": "

    The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " + } + }, + "DimensionValueSource": { + "base": "

    The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. If you want to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail or SendRawEmail API, choose messageTag. If you want to use your own email headers, choose emailHeader. If you want to use link tags, choose linkTags.

    ", + "refs": { + "CloudWatchDimensionConfiguration$DimensionValueSource": "

    The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. If you want to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail or SendRawEmail API, choose messageTag. If you want to use your own email headers, choose emailHeader. If you want to use link tags, choose linkTags.

    " + } + }, + "DkimAttributes": { + "base": "

    An object that contains information about the DKIM configuration for an email identity.

    ", + "refs": { + "CreateEmailIdentityResponse$DkimAttributes": "

    An object that contains information about the DKIM attributes for the identity. This object includes the tokens that you use to create the CNAME records that are required to complete the DKIM verification process.

    ", + "GetEmailIdentityResponse$DkimAttributes": "

    An object that contains information about the DKIM attributes for the identity. This object includes the tokens that you use to create the CNAME records that are required to complete the DKIM verification process.

    " + } + }, + "DkimStatus": { + "base": "

    The DKIM authentication status of the identity. The status can be one of the following:

    • PENDING – The DKIM verification process was initiated, and Amazon SES hasn't yet detected the CNAME records in the DNS configuration for the domain.

    • SUCCESS – The DKIM authentication process completed successfully.

    • FAILED – The DKIM authentication process failed. This can happen when Amazon SES fails to find the required CNAME records in the DNS configuration of the domain.

    • TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from determining the DKIM authentication status of the domain.

    • NOT_STARTED – The DKIM verification process hasn't been initiated for the domain.

    ", + "refs": { + "DkimAttributes$Status": "

    Describes whether or not Amazon SES has successfully located the DKIM records in the DNS records for the domain. The status can be one of the following:

    • PENDING – Amazon SES hasn't yet detected the DKIM records in the DNS configuration for the domain, but will continue to attempt to locate them.

    • SUCCESS – Amazon SES located the DKIM records in the DNS configuration for the domain and determined that they're correct. You can now send DKIM-signed email from the identity.

    • FAILED – Amazon SES wasn't able to locate the DKIM records in the DNS settings for the domain, and won't continue to search for them.

    • TEMPORARY_FAILURE – A temporary issue occurred, which prevented Amazon SES from determining the DKIM status for the domain.

    • NOT_STARTED – Amazon SES hasn't yet started searching for the DKIM records in the DKIM records for the domain.

    " + } + }, + "DnsToken": { + "base": null, + "refs": { + "DnsTokenList$member": null + } + }, + "DnsTokenList": { + "base": null, + "refs": { + "DkimAttributes$Tokens": "

    A set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. Amazon SES usually detects these records within about 72 hours of adding them to the DNS configuration for your domain.

    " + } + }, + "Domain": { + "base": null, + "refs": { + "DomainDeliverabilityTrackingOption$Domain": "

    A verified domain that’s associated with your AWS account and currently has an active Deliverability dashboard subscription.

    ", + "ListDomainDeliverabilityCampaignsRequest$SubscribedDomain": "

    The domain to obtain deliverability data for.

    " + } + }, + "DomainDeliverabilityCampaign": { + "base": "

    An object that contains the deliverability data for a specific campaign. This data is available for a campaign only if the campaign sent email by using a domain that the Deliverability dashboard is enabled for (PutDeliverabilityDashboardOption operation).

    ", + "refs": { + "DomainDeliverabilityCampaignList$member": null, + "GetDomainDeliverabilityCampaignResponse$DomainDeliverabilityCampaign": "

    An object that contains the deliverability data for the campaign.

    " + } + }, + "DomainDeliverabilityCampaignList": { + "base": "

    ", + "refs": { + "ListDomainDeliverabilityCampaignsResponse$DomainDeliverabilityCampaigns": "

    An array of responses, one for each campaign that used the domain to send email during the specified time range.

    " + } + }, + "DomainDeliverabilityTrackingOption": { + "base": "

    An object that contains information about the Deliverability dashboard subscription for a verified domain that you use to send email and currently has an active Deliverability dashboard subscription. If a Deliverability dashboard subscription is active for a domain, you gain access to reputation, inbox placement, and other metrics for the domain.

    ", + "refs": { + "DomainDeliverabilityTrackingOptions$member": null + } + }, + "DomainDeliverabilityTrackingOptions": { + "base": "

    An object that contains information about the Deliverability dashboard subscription for a verified domain that you use to send email and currently has an active Deliverability dashboard subscription. If a Deliverability dashboard subscription is active for a domain, you gain access to reputation, inbox placement, and other metrics for the domain.

    ", + "refs": { + "GetDeliverabilityDashboardOptionsResponse$ActiveSubscribedDomains": "

    An array of objects, one for each verified domain that you use to send email and currently has an active Deliverability dashboard subscription that isn’t scheduled to expire at the end of the current calendar month.

    ", + "GetDeliverabilityDashboardOptionsResponse$PendingExpirationSubscribedDomains": "

    An array of objects, one for each verified domain that you use to send email and currently has an active Deliverability dashboard subscription that's scheduled to expire at the end of the current calendar month.

    ", + "PutDeliverabilityDashboardOptionRequest$SubscribedDomains": "

    An array of objects, one for each verified domain that you use to send email and enabled the Deliverability dashboard for.

    " + } + }, + "DomainIspPlacement": { + "base": "

    An object that contains inbox placement data for email sent from one of your email domains to a specific email provider.

    ", + "refs": { + "DomainIspPlacements$member": null + } + }, + "DomainIspPlacements": { + "base": null, + "refs": { + "DailyVolume$DomainIspPlacements": "

    An object that contains inbox placement metrics for a specified day in the analysis period, broken out by the recipient's email provider.

    ", + "OverallVolume$DomainIspPlacements": "

    An object that contains inbox and junk mail placement metrics for individual email providers.

    " + } + }, + "EmailAddress": { + "base": null, + "refs": { + "CreateDeliverabilityTestReportRequest$FromEmailAddress": "

    The email address that the predictive inbox placement test email was sent from.

    ", + "DeliverabilityTestReport$FromEmailAddress": "

    The sender address that you specified for the predictive inbox placement test.

    ", + "EmailAddressList$member": null, + "SendEmailRequest$FromEmailAddress": "

    The email address that you want to use as the \"From\" address for the email. The address that you specify has to be verified.

    ", + "SendEmailRequest$FeedbackForwardingEmailAddress": "

    The address that you want bounce and complaint notifications to be sent to.

    " + } + }, + "EmailAddressList": { + "base": null, + "refs": { + "Destination$ToAddresses": "

    An array that contains the email addresses of the \"To\" recipients for the email.

    ", + "Destination$CcAddresses": "

    An array that contains the email addresses of the \"CC\" (carbon copy) recipients for the email.

    ", + "Destination$BccAddresses": "

    An array that contains the email addresses of the \"BCC\" (blind carbon copy) recipients for the email.

    ", + "SendEmailRequest$ReplyToAddresses": "

    The \"Reply-to\" email addresses for the message. When the recipient replies to the message, each Reply-to address receives the reply.

    " + } + }, + "EmailContent": { + "base": "

    An object that defines the entire content of the email, including the message headers and the body content. You can create a simple email message, in which you specify the subject and the text and HTML versions of the message body. You can also create raw messages, in which you specify a complete MIME-formatted message. Raw messages can include attachments and custom headers.

    ", + "refs": { + "CreateDeliverabilityTestReportRequest$Content": "

    The HTML body of the message that you sent when you performed the predictive inbox placement test.

    ", + "SendEmailRequest$Content": "

    An object that contains the body of the message. You can send either a Simple message or a Raw message.

    " + } + }, + "Enabled": { + "base": null, + "refs": { + "CreateEmailIdentityResponse$VerifiedForSendingStatus": "

    Specifies whether or not the identity is verified. You can only send email from verified email addresses or domains. For more information about verifying identities, see the Amazon Pinpoint User Guide.

    ", + "DkimAttributes$SigningEnabled": "

    If the value is true, then the messages that you send from the identity are signed using DKIM. If the value is false, then the messages that you send from the identity aren't DKIM-signed.

    ", + "EventDestination$Enabled": "

    If true, the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this EventDestinationDefinition.

    If false, the event destination is disabled. When the event destination is disabled, events aren't sent to the specified destinations.

    ", + "EventDestinationDefinition$Enabled": "

    If true, the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this EventDestinationDefinition.

    If false, the event destination is disabled. When the event destination is disabled, events aren't sent to the specified destinations.

    ", + "GetAccountResponse$SendingEnabled": "

    Indicates whether or not email sending is enabled for your Amazon SES account in the current AWS Region.

    ", + "GetAccountResponse$DedicatedIpAutoWarmupEnabled": "

    Indicates whether or not the automatic warm-up feature is enabled for dedicated IP addresses that are associated with your account.

    ", + "GetAccountResponse$ProductionAccessEnabled": "

    Indicates whether or not your account has production access in the current AWS Region.

    If the value is false, then your account is in the sandbox. When your account is in the sandbox, you can only send email to verified identities. Additionally, the maximum number of emails you can send in a 24-hour period (your sending quota) is 200, and the maximum number of emails you can send per second (your maximum sending rate) is 1.

    If the value is true, then your account has production access. When your account has production access, you can send email to any address. The sending quota and maximum sending rate for your account vary based on your specific use case.

    ", + "GetDeliverabilityDashboardOptionsResponse$DashboardEnabled": "

    Specifies whether the Deliverability dashboard is enabled. If this value is true, the dashboard is enabled.

    ", + "GetEmailIdentityResponse$FeedbackForwardingStatus": "

    The feedback forwarding configuration for the identity.

    If the value is true, you receive email notifications when bounce or complaint events occur. These notifications are sent to the address that you specified in the Return-Path header of the original email.

    You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications (for example, by setting up an event destination), you receive an email notification when these events occur (even if this setting is disabled).

    ", + "GetEmailIdentityResponse$VerifiedForSendingStatus": "

    Specifies whether or not the identity is verified. You can only send email from verified email addresses or domains. For more information about verifying identities, see the Amazon Pinpoint User Guide.

    ", + "IdentityInfo$SendingEnabled": "

    Indicates whether or not you can send email from the identity.

    An identity is an email address or domain that you send email from. Before you can send email from an identity, you have to demostrate that you own the identity, and that you authorize Amazon SES to send email from that identity.

    ", + "InboxPlacementTrackingOption$Global": "

    Specifies whether inbox placement data is being tracked for the domain.

    ", + "PutAccountDedicatedIpWarmupAttributesRequest$AutoWarmupEnabled": "

    Enables or disables the automatic warm-up feature for dedicated IP addresses that are associated with your Amazon SES account in the current AWS Region. Set to true to enable the automatic warm-up feature, or set to false to disable it.

    ", + "PutAccountSendingAttributesRequest$SendingEnabled": "

    Enables or disables your account's ability to send email. Set to true to enable email sending, or set to false to disable email sending.

    If AWS paused your account's ability to send email, you can't use this operation to resume your account's ability to send email.

    ", + "PutConfigurationSetReputationOptionsRequest$ReputationMetricsEnabled": "

    If true, tracking of reputation metrics is enabled for the configuration set. If false, tracking of reputation metrics is disabled for the configuration set.

    ", + "PutConfigurationSetSendingOptionsRequest$SendingEnabled": "

    If true, email sending is enabled for the configuration set. If false, email sending is disabled for the configuration set.

    ", + "PutDeliverabilityDashboardOptionRequest$DashboardEnabled": "

    Specifies whether to enable the Deliverability dashboard. To enable the dashboard, set this value to true.

    ", + "PutEmailIdentityDkimAttributesRequest$SigningEnabled": "

    Sets the DKIM signing configuration for the identity.

    When you set this value true, then the messages that are sent from the identity are signed using DKIM. If you set this value to false, your messages are sent without DKIM signing.

    ", + "PutEmailIdentityFeedbackAttributesRequest$EmailForwardingEnabled": "

    Sets the feedback forwarding configuration for the identity.

    If the value is true, you receive email notifications when bounce or complaint events occur. These notifications are sent to the address that you specified in the Return-Path header of the original email.

    You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications (for example, by setting up an event destination), you receive an email notification when these events occur (even if this setting is disabled).

    ", + "ReputationOptions$ReputationMetricsEnabled": "

    If true, tracking of reputation metrics is enabled for the configuration set. If false, tracking of reputation metrics is disabled for the configuration set.

    ", + "SendingOptions$SendingEnabled": "

    If true, email sending is enabled for the configuration set. If false, email sending is disabled for the configuration set.

    " + } + }, + "Esp": { + "base": null, + "refs": { + "Esps$member": null + } + }, + "Esps": { + "base": null, + "refs": { + "DomainDeliverabilityCampaign$Esps": "

    The major email providers who handled the email message.

    " + } + }, + "EventDestination": { + "base": "

    In the Amazon SES API v2, events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

    ", + "refs": { + "EventDestinations$member": null + } + }, + "EventDestinationDefinition": { + "base": "

    An object that defines the event destination. Specifically, it defines which services receive events from emails sent using the configuration set that the event destination is associated with. Also defines the types of events that are sent to the event destination.

    ", + "refs": { + "CreateConfigurationSetEventDestinationRequest$EventDestination": "

    An object that defines the event destination.

    ", + "UpdateConfigurationSetEventDestinationRequest$EventDestination": "

    An object that defines the event destination.

    " + } + }, + "EventDestinationName": { + "base": "

    The name of an event destination.

    Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.

    ", + "refs": { + "CreateConfigurationSetEventDestinationRequest$EventDestinationName": "

    A name that identifies the event destination within the configuration set.

    ", + "DeleteConfigurationSetEventDestinationRequest$EventDestinationName": "

    The name of the event destination that you want to delete.

    ", + "EventDestination$Name": "

    A name that identifies the event destination.

    ", + "UpdateConfigurationSetEventDestinationRequest$EventDestinationName": "

    The name of the event destination that you want to modify.

    " + } + }, + "EventDestinations": { + "base": null, + "refs": { + "GetConfigurationSetEventDestinationsResponse$EventDestinations": "

    An array that includes all of the events destinations that have been configured for the configuration set.

    " + } + }, + "EventType": { + "base": "

    An email sending event type. For example, email sends, opens, and bounces are all email events.

    ", + "refs": { + "EventTypes$member": null + } + }, + "EventTypes": { + "base": null, + "refs": { + "EventDestination$MatchingEventTypes": "

    The types of events that Amazon SES sends to the specified event destinations.

    ", + "EventDestinationDefinition$MatchingEventTypes": "

    An array that specifies which events the Amazon SES API v2 should send to the destinations in this EventDestinationDefinition.

    " + } + }, + "GeneralEnforcementStatus": { + "base": null, + "refs": { + "GetAccountResponse$EnforcementStatus": "

    The reputation status of your Amazon SES account. The status can be one of the following:

    • HEALTHY – There are no reputation-related issues that currently impact your account.

    • PROBATION – We've identified potential issues with your Amazon SES account. We're placing your account under review while you work on correcting these issues.

    • SHUTDOWN – Your account's ability to send email is currently paused because of an issue with the email sent from your account. When you correct the issue, you can contact us and request that your account's ability to send email is resumed.

    " + } + }, + "GetAccountRequest": { + "base": "

    A request to obtain information about the email-sending capabilities of your Amazon SES account.

    ", + "refs": { + } + }, + "GetAccountResponse": { + "base": "

    A list of details about the email-sending capabilities of your Amazon SES account in the current AWS Region.

    ", + "refs": { + } + }, + "GetBlacklistReportsRequest": { + "base": "

    A request to retrieve a list of the blacklists that your dedicated IP addresses appear on.

    ", + "refs": { + } + }, + "GetBlacklistReportsResponse": { + "base": "

    An object that contains information about blacklist events.

    ", + "refs": { + } + }, + "GetConfigurationSetEventDestinationsRequest": { + "base": "

    A request to obtain information about the event destinations for a configuration set.

    ", + "refs": { + } + }, + "GetConfigurationSetEventDestinationsResponse": { + "base": "

    Information about an event destination for a configuration set.

    ", + "refs": { + } + }, + "GetConfigurationSetRequest": { + "base": "

    A request to obtain information about a configuration set.

    ", + "refs": { + } + }, + "GetConfigurationSetResponse": { + "base": "

    Information about a configuration set.

    ", + "refs": { + } + }, + "GetDedicatedIpRequest": { + "base": "

    A request to obtain more information about a dedicated IP address.

    ", + "refs": { + } + }, + "GetDedicatedIpResponse": { + "base": "

    Information about a dedicated IP address.

    ", + "refs": { + } + }, + "GetDedicatedIpsRequest": { + "base": "

    A request to obtain more information about dedicated IP pools.

    ", + "refs": { + } + }, + "GetDedicatedIpsResponse": { + "base": "

    Information about the dedicated IP addresses that are associated with your AWS account.

    ", + "refs": { + } + }, + "GetDeliverabilityDashboardOptionsRequest": { + "base": "

    Retrieve information about the status of the Deliverability dashboard for your AWS account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for your domains. You also gain the ability to perform predictive inbox placement tests.

    When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

    ", + "refs": { + } + }, + "GetDeliverabilityDashboardOptionsResponse": { + "base": "

    An object that shows the status of the Deliverability dashboard.

    ", + "refs": { + } + }, + "GetDeliverabilityTestReportRequest": { + "base": "

    A request to retrieve the results of a predictive inbox placement test.

    ", + "refs": { + } + }, + "GetDeliverabilityTestReportResponse": { + "base": "

    The results of the predictive inbox placement test.

    ", + "refs": { + } + }, + "GetDomainDeliverabilityCampaignRequest": { + "base": "

    Retrieve all the deliverability data for a specific campaign. This data is available for a campaign only if the campaign sent email by using a domain that the Deliverability dashboard is enabled for (PutDeliverabilityDashboardOption operation).

    ", + "refs": { + } + }, + "GetDomainDeliverabilityCampaignResponse": { + "base": "

    An object that contains all the deliverability data for a specific campaign. This data is available for a campaign only if the campaign sent email by using a domain that the Deliverability dashboard is enabled for.

    ", + "refs": { + } + }, + "GetDomainStatisticsReportRequest": { + "base": "

    A request to obtain deliverability metrics for a domain.

    ", + "refs": { + } + }, + "GetDomainStatisticsReportResponse": { + "base": "

    An object that includes statistics that are related to the domain that you specified.

    ", + "refs": { + } + }, + "GetEmailIdentityRequest": { + "base": "

    A request to return details about an email identity.

    ", + "refs": { + } + }, + "GetEmailIdentityResponse": { + "base": "

    Details about an email identity.

    ", + "refs": { + } + }, + "Identity": { + "base": null, + "refs": { + "CreateEmailIdentityRequest$EmailIdentity": "

    The email address or domain that you want to verify.

    ", + "DeleteEmailIdentityRequest$EmailIdentity": "

    The identity (that is, the email address or domain) that you want to delete.

    ", + "DomainDeliverabilityCampaign$FromAddress": "

    The verified email address that the email message was sent from.

    ", + "GetDomainStatisticsReportRequest$Domain": "

    The domain that you want to obtain deliverability metrics for.

    ", + "GetEmailIdentityRequest$EmailIdentity": "

    The email identity that you want to retrieve details for.

    ", + "IdentityInfo$IdentityName": "

    The address or domain of the identity.

    ", + "PutEmailIdentityDkimAttributesRequest$EmailIdentity": "

    The email identity that you want to change the DKIM settings for.

    ", + "PutEmailIdentityFeedbackAttributesRequest$EmailIdentity": "

    The email identity that you want to configure bounce and complaint feedback forwarding for.

    ", + "PutEmailIdentityMailFromAttributesRequest$EmailIdentity": "

    The verified email identity that you want to set up the custom MAIL FROM domain for.

    " + } + }, + "IdentityInfo": { + "base": "

    Information about an email identity.

    ", + "refs": { + "IdentityInfoList$member": null + } + }, + "IdentityInfoList": { + "base": null, + "refs": { + "ListEmailIdentitiesResponse$EmailIdentities": "

    An array that includes all of the email identities associated with your AWS account.

    " + } + }, + "IdentityType": { + "base": "

    The email identity type. The identity type can be one of the following:

    • EMAIL_ADDRESS – The identity is an email address.

    • DOMAIN – The identity is a domain.

    ", + "refs": { + "CreateEmailIdentityResponse$IdentityType": "

    The email identity type.

    ", + "GetEmailIdentityResponse$IdentityType": "

    The email identity type.

    ", + "IdentityInfo$IdentityType": "

    The email identity type. The identity type can be one of the following:

    • EMAIL_ADDRESS – The identity is an email address.

    • DOMAIN – The identity is a domain.

    • MANAGED_DOMAIN – The identity is a domain that is managed by AWS.

    " + } + }, + "ImageUrl": { + "base": null, + "refs": { + "DomainDeliverabilityCampaign$ImageUrl": "

    The URL of an image that contains a snapshot of the email message that was sent.

    " + } + }, + "InboxPlacementTrackingOption": { + "base": "

    An object that contains information about the inbox placement data settings for a verified domain that’s associated with your AWS account. This data is available only if you enabled the Deliverability dashboard for the domain.

    ", + "refs": { + "DomainDeliverabilityTrackingOption$InboxPlacementTrackingOption": "

    An object that contains information about the inbox placement data settings for the domain.

    " + } + }, + "Ip": { + "base": "

    An IPv4 address.

    ", + "refs": { + "DedicatedIp$Ip": "

    An IPv4 address.

    ", + "GetDedicatedIpRequest$Ip": "

    The IP address that you want to obtain more information about. The value you specify has to be a dedicated IP address that's assocaited with your AWS account.

    ", + "IpList$member": null, + "PutDedicatedIpInPoolRequest$Ip": "

    The IP address that you want to move to the dedicated IP pool. The value you specify has to be a dedicated IP address that's associated with your AWS account.

    ", + "PutDedicatedIpWarmupAttributesRequest$Ip": "

    The dedicated IP address that you want to update the warm-up attributes for.

    " + } + }, + "IpList": { + "base": null, + "refs": { + "DomainDeliverabilityCampaign$SendingIps": "

    The IP addresses that were used to send the email message.

    " + } + }, + "IspName": { + "base": "

    The name of an email provider.

    ", + "refs": { + "DomainIspPlacement$IspName": "

    The name of the email provider that the inbox placement data applies to.

    ", + "IspNameList$member": null, + "IspPlacement$IspName": "

    The name of the email provider that the inbox placement data applies to.

    " + } + }, + "IspNameList": { + "base": null, + "refs": { + "InboxPlacementTrackingOption$TrackedIsps": "

    An array of strings, one for each major email provider that the inbox placement data applies to.

    " + } + }, + "IspPlacement": { + "base": "

    An object that describes how email sent during the predictive inbox placement test was handled by a certain email provider.

    ", + "refs": { + "IspPlacements$member": null + } + }, + "IspPlacements": { + "base": null, + "refs": { + "GetDeliverabilityTestReportResponse$IspPlacements": "

    An object that describes how the test email was handled by several email providers, including Gmail, Hotmail, Yahoo, AOL, and others.

    " + } + }, + "KinesisFirehoseDestination": { + "base": "

    An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.

    ", + "refs": { + "EventDestination$KinesisFirehoseDestination": "

    An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.

    ", + "EventDestinationDefinition$KinesisFirehoseDestination": "

    An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.

    " + } + }, + "LastFreshStart": { + "base": "

    The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.

    ", + "refs": { + "ReputationOptions$LastFreshStart": "

    The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.

    " + } + }, + "LimitExceededException": { + "base": "

    There are too many instances of the specified resource type.

    ", + "refs": { + } + }, + "ListConfigurationSetsRequest": { + "base": "

    A request to obtain a list of configuration sets for your Amazon SES account in the current AWS Region.

    ", + "refs": { + } + }, + "ListConfigurationSetsResponse": { + "base": "

    A list of configuration sets in your Amazon SES account in the current AWS Region.

    ", + "refs": { + } + }, + "ListDedicatedIpPoolsRequest": { + "base": "

    A request to obtain a list of dedicated IP pools.

    ", + "refs": { + } + }, + "ListDedicatedIpPoolsResponse": { + "base": "

    A list of dedicated IP pools.

    ", + "refs": { + } + }, + "ListDeliverabilityTestReportsRequest": { + "base": "

    A request to list all of the predictive inbox placement tests that you've performed.

    ", + "refs": { + } + }, + "ListDeliverabilityTestReportsResponse": { + "base": "

    A list of the predictive inbox placement test reports that are available for your account, regardless of whether or not those tests are complete.

    ", + "refs": { + } + }, + "ListDomainDeliverabilityCampaignsRequest": { + "base": "

    Retrieve deliverability data for all the campaigns that used a specific domain to send email during a specified time range. This data is available for a domain only if you enabled the Deliverability dashboard.

    ", + "refs": { + } + }, + "ListDomainDeliverabilityCampaignsResponse": { + "base": "

    An array of objects that provide deliverability data for all the campaigns that used a specific domain to send email during a specified time range. This data is available for a domain only if you enabled the Deliverability dashboard for the domain.

    ", + "refs": { + } + }, + "ListEmailIdentitiesRequest": { + "base": "

    A request to list all of the email identities associated with your AWS account. This list includes identities that you've already verified, identities that are unverified, and identities that were verified in the past, but are no longer verified.

    ", + "refs": { + } + }, + "ListEmailIdentitiesResponse": { + "base": "

    A list of all of the identities that you've attempted to verify, regardless of whether or not those identities were successfully verified.

    ", + "refs": { + } + }, + "ListOfDedicatedIpPools": { + "base": "

    A list of dedicated IP pools that are associated with your AWS account.

    ", + "refs": { + "ListDedicatedIpPoolsResponse$DedicatedIpPools": "

    A list of all of the dedicated IP pools that are associated with your AWS account in the current Region.

    " + } + }, + "ListTagsForResourceRequest": { + "base": null, + "refs": { + } + }, + "ListTagsForResourceResponse": { + "base": null, + "refs": { + } + }, + "MailFromAttributes": { + "base": "

    A list of attributes that are associated with a MAIL FROM domain.

    ", + "refs": { + "GetEmailIdentityResponse$MailFromAttributes": "

    An object that contains information about the Mail-From attributes for the email identity.

    " + } + }, + "MailFromDomainName": { + "base": "

    The domain that you want to use as a MAIL FROM domain.

    ", + "refs": { + "MailFromAttributes$MailFromDomain": "

    The name of a domain that an email identity uses as a custom MAIL FROM domain.

    ", + "PutEmailIdentityMailFromAttributesRequest$MailFromDomain": "

    The custom MAIL FROM domain that you want the verified identity to use. The MAIL FROM domain must meet the following criteria:

    • It has to be a subdomain of the verified identity.

    • It can't be used to receive email.

    • It can't be used in a \"From\" address if the MAIL FROM domain is a destination for feedback forwarding emails.

    " + } + }, + "MailFromDomainNotVerifiedException": { + "base": "

    The message can't be sent because the sending domain isn't verified.

    ", + "refs": { + } + }, + "MailFromDomainStatus": { + "base": "

    The status of the MAIL FROM domain. This status can have the following values:

    • PENDING – Amazon SES hasn't started searching for the MX record yet.

    • SUCCESS – Amazon SES detected the required MX record for the MAIL FROM domain.

    • FAILED – Amazon SES can't find the required MX record, or the record no longer exists.

    • TEMPORARY_FAILURE – A temporary issue occurred, which prevented Amazon SES from determining the status of the MAIL FROM domain.

    ", + "refs": { + "MailFromAttributes$MailFromDomainStatus": "

    The status of the MAIL FROM domain. This status can have the following values:

    • PENDING – Amazon SES hasn't started searching for the MX record yet.

    • SUCCESS – Amazon SES detected the required MX record for the MAIL FROM domain.

    • FAILED – Amazon SES can't find the required MX record, or the record no longer exists.

    • TEMPORARY_FAILURE – A temporary issue occurred, which prevented Amazon SES from determining the status of the MAIL FROM domain.

    " + } + }, + "Max24HourSend": { + "base": null, + "refs": { + "SendQuota$Max24HourSend": "

    The maximum number of emails that you can send in the current AWS Region over a 24-hour period. This value is also called your sending quota.

    " + } + }, + "MaxItems": { + "base": null, + "refs": { + "GetDedicatedIpsRequest$PageSize": "

    The number of results to show in a single call to GetDedicatedIpsRequest. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    ", + "ListConfigurationSetsRequest$PageSize": "

    The number of results to show in a single call to ListConfigurationSets. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    ", + "ListDedicatedIpPoolsRequest$PageSize": "

    The number of results to show in a single call to ListDedicatedIpPools. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    ", + "ListDeliverabilityTestReportsRequest$PageSize": "

    The number of results to show in a single call to ListDeliverabilityTestReports. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    The value you specify has to be at least 0, and can be no more than 1000.

    ", + "ListDomainDeliverabilityCampaignsRequest$PageSize": "

    The maximum number of results to include in response to a single call to the ListDomainDeliverabilityCampaigns operation. If the number of results is larger than the number that you specify in this parameter, the response includes a NextToken element, which you can use to obtain additional results.

    ", + "ListEmailIdentitiesRequest$PageSize": "

    The number of results to show in a single call to ListEmailIdentities. If the number of results is larger than the number you specified in this parameter, then the response includes a NextToken element, which you can use to obtain additional results.

    The value you specify has to be at least 0, and can be no more than 1000.

    " + } + }, + "MaxSendRate": { + "base": null, + "refs": { + "SendQuota$MaxSendRate": "

    The maximum number of emails that you can send per second in the current AWS Region. This value is also called your maximum sending rate or your maximum TPS (transactions per second) rate.

    " + } + }, + "Message": { + "base": "

    Represents the email message that you're sending. The Message object consists of a subject line and a message body.

    ", + "refs": { + "EmailContent$Simple": "

    The simple email message. The message consists of a subject and a message body.

    " + } + }, + "MessageContent": { + "base": "

    The body of an email message.

    ", + "refs": { + "GetDeliverabilityTestReportResponse$Message": "

    An object that contains the message that you sent when you performed this predictive inbox placement test.

    " + } + }, + "MessageData": { + "base": null, + "refs": { + "Content$Data": "

    The content of the message itself.

    " + } + }, + "MessageRejected": { + "base": "

    The message can't be sent because it contains invalid content.

    ", + "refs": { + } + }, + "MessageTag": { + "base": "

    Contains the name and value of a tag that you apply to an email. You can use message tags when you publish email sending events.

    ", + "refs": { + "MessageTagList$member": null + } + }, + "MessageTagList": { + "base": "

    A list of message tags.

    ", + "refs": { + "SendEmailRequest$EmailTags": "

    A list of tags, in the form of name/value pairs, to apply to an email that you send using the SendEmail operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events.

    " + } + }, + "MessageTagName": { + "base": "

    The name of the message tag. The message tag name has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    ", + "refs": { + "MessageTag$Name": "

    The name of the message tag. The message tag name has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " + } + }, + "MessageTagValue": { + "base": "

    The value of the message tag. The message tag value has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    ", + "refs": { + "MessageTag$Value": "

    The value of the message tag. The message tag value has to meet the following criteria:

    • It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).

    • It can contain no more than 256 characters.

    " + } + }, + "NextToken": { + "base": null, + "refs": { + "GetDedicatedIpsRequest$NextToken": "

    A token returned from a previous call to GetDedicatedIps to indicate the position of the dedicated IP pool in the list of IP pools.

    ", + "GetDedicatedIpsResponse$NextToken": "

    A token that indicates that there are additional dedicated IP addresses to list. To view additional addresses, issue another request to GetDedicatedIps, passing this token in the NextToken parameter.

    ", + "ListConfigurationSetsRequest$NextToken": "

    A token returned from a previous call to ListConfigurationSets to indicate the position in the list of configuration sets.

    ", + "ListConfigurationSetsResponse$NextToken": "

    A token that indicates that there are additional configuration sets to list. To view additional configuration sets, issue another request to ListConfigurationSets, and pass this token in the NextToken parameter.

    ", + "ListDedicatedIpPoolsRequest$NextToken": "

    A token returned from a previous call to ListDedicatedIpPools to indicate the position in the list of dedicated IP pools.

    ", + "ListDedicatedIpPoolsResponse$NextToken": "

    A token that indicates that there are additional IP pools to list. To view additional IP pools, issue another request to ListDedicatedIpPools, passing this token in the NextToken parameter.

    ", + "ListDeliverabilityTestReportsRequest$NextToken": "

    A token returned from a previous call to ListDeliverabilityTestReports to indicate the position in the list of predictive inbox placement tests.

    ", + "ListDeliverabilityTestReportsResponse$NextToken": "

    A token that indicates that there are additional predictive inbox placement tests to list. To view additional predictive inbox placement tests, issue another request to ListDeliverabilityTestReports, and pass this token in the NextToken parameter.

    ", + "ListDomainDeliverabilityCampaignsRequest$NextToken": "

    A token that’s returned from a previous call to the ListDomainDeliverabilityCampaigns operation. This token indicates the position of a campaign in the list of campaigns.

    ", + "ListDomainDeliverabilityCampaignsResponse$NextToken": "

    A token that’s returned from a previous call to the ListDomainDeliverabilityCampaigns operation. This token indicates the position of the campaign in the list of campaigns.

    ", + "ListEmailIdentitiesRequest$NextToken": "

    A token returned from a previous call to ListEmailIdentities to indicate the position in the list of identities.

    ", + "ListEmailIdentitiesResponse$NextToken": "

    A token that indicates that there are additional configuration sets to list. To view additional configuration sets, issue another request to ListEmailIdentities, and pass this token in the NextToken parameter.

    " + } + }, + "NotFoundException": { + "base": "

    The resource you attempted to access doesn't exist.

    ", + "refs": { + } + }, + "OutboundMessageId": { + "base": null, + "refs": { + "SendEmailResponse$MessageId": "

    A unique identifier for the message that is generated when the message is accepted.

    It is possible for the Amazon SES API v2 to accept a message without sending it. This can happen when the message that you're trying to send has an attachment contains a virus, or when you send a templated email that contains invalid personalization content, for example.

    " + } + }, + "OverallVolume": { + "base": "

    An object that contains information about email that was sent from the selected domain.

    ", + "refs": { + "GetDomainStatisticsReportResponse$OverallVolume": "

    An object that contains deliverability metrics for the domain that you specified. The data in this object is a summary of all of the data that was collected from the StartDate to the EndDate.

    " + } + }, + "Percentage": { + "base": "

    An object that contains information about inbox placement percentages.

    ", + "refs": { + "DomainDeliverabilityCampaign$ReadRate": "

    The percentage of email messages that were opened by recipients. Due to technical limitations, this value only includes recipients who opened the message by using an email client that supports images.

    ", + "DomainDeliverabilityCampaign$DeleteRate": "

    The percentage of email messages that were deleted by recipients, without being opened first. Due to technical limitations, this value only includes recipients who opened the message by using an email client that supports images.

    ", + "DomainDeliverabilityCampaign$ReadDeleteRate": "

    The percentage of email messages that were opened and then deleted by recipients. Due to technical limitations, this value only includes recipients who opened the message by using an email client that supports images.

    ", + "DomainIspPlacement$InboxPercentage": "

    The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.

    ", + "DomainIspPlacement$SpamPercentage": "

    The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.

    ", + "OverallVolume$ReadRatePercent": "

    The percentage of emails that were sent from the domain that were read by their recipients.

    ", + "PlacementStatistics$InboxPercentage": "

    The percentage of emails that arrived in recipients' inboxes during the predictive inbox placement test.

    ", + "PlacementStatistics$SpamPercentage": "

    The percentage of emails that arrived in recipients' spam or junk mail folders during the predictive inbox placement test.

    ", + "PlacementStatistics$MissingPercentage": "

    The percentage of emails that didn't arrive in recipients' inboxes at all during the predictive inbox placement test.

    ", + "PlacementStatistics$SpfPercentage": "

    The percentage of emails that were authenticated by using Sender Policy Framework (SPF) during the predictive inbox placement test.

    ", + "PlacementStatistics$DkimPercentage": "

    The percentage of emails that were authenticated by using DomainKeys Identified Mail (DKIM) during the predictive inbox placement test.

    " + } + }, + "Percentage100Wrapper": { + "base": null, + "refs": { + "DedicatedIp$WarmupPercentage": "

    Indicates how complete the dedicated IP warm-up process is. When this value equals 1, the address has completed the warm-up process and is ready for use.

    ", + "PutDedicatedIpWarmupAttributesRequest$WarmupPercentage": "

    The warm-up percentage that you want to associate with the dedicated IP address.

    " + } + }, + "PinpointDestination": { + "base": "

    An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.

    ", + "refs": { + "EventDestination$PinpointDestination": "

    An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.

    ", + "EventDestinationDefinition$PinpointDestination": "

    An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.

    " + } + }, + "PlacementStatistics": { + "base": "

    An object that contains inbox placement data for an email provider.

    ", + "refs": { + "GetDeliverabilityTestReportResponse$OverallPlacement": "

    An object that specifies how many test messages that were sent during the predictive inbox placement test were delivered to recipients' inboxes, how many were sent to recipients' spam folders, and how many weren't delivered.

    ", + "IspPlacement$PlacementStatistics": "

    An object that contains inbox placement metrics for a specific email provider.

    " + } + }, + "PoolName": { + "base": "

    The name of a dedicated IP pool.

    ", + "refs": { + "CreateDedicatedIpPoolRequest$PoolName": "

    The name of the dedicated IP pool.

    ", + "DedicatedIp$PoolName": "

    The name of the dedicated IP pool that the IP address is associated with.

    ", + "DeleteDedicatedIpPoolRequest$PoolName": "

    The name of the dedicated IP pool that you want to delete.

    ", + "DeliveryOptions$SendingPoolName": "

    The name of the dedicated IP pool that you want to associate with the configuration set.

    ", + "GetDedicatedIpsRequest$PoolName": "

    The name of the IP pool that the dedicated IP address is associated with.

    ", + "ListOfDedicatedIpPools$member": null, + "PutDedicatedIpInPoolRequest$DestinationPoolName": "

    The name of the IP pool that you want to add the dedicated IP address to. You have to specify an IP pool that already exists.

    " + } + }, + "PutAccountDedicatedIpWarmupAttributesRequest": { + "base": "

    A request to enable or disable the automatic IP address warm-up feature.

    ", + "refs": { + } + }, + "PutAccountDedicatedIpWarmupAttributesResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "PutAccountSendingAttributesRequest": { + "base": "

    A request to change the ability of your account to send email.

    ", + "refs": { + } + }, + "PutAccountSendingAttributesResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "PutConfigurationSetDeliveryOptionsRequest": { + "base": "

    A request to associate a configuration set with a dedicated IP pool.

    ", + "refs": { + } + }, + "PutConfigurationSetDeliveryOptionsResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "PutConfigurationSetReputationOptionsRequest": { + "base": "

    A request to enable or disable tracking of reputation metrics for a configuration set.

    ", + "refs": { + } + }, + "PutConfigurationSetReputationOptionsResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "PutConfigurationSetSendingOptionsRequest": { + "base": "

    A request to enable or disable the ability of Amazon SES to send emails that use a specific configuration set.

    ", + "refs": { + } + }, + "PutConfigurationSetSendingOptionsResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "PutConfigurationSetTrackingOptionsRequest": { + "base": "

    A request to add a custom domain for tracking open and click events to a configuration set.

    ", + "refs": { + } + }, + "PutConfigurationSetTrackingOptionsResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "PutDedicatedIpInPoolRequest": { + "base": "

    A request to move a dedicated IP address to a dedicated IP pool.

    ", + "refs": { + } + }, + "PutDedicatedIpInPoolResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "PutDedicatedIpWarmupAttributesRequest": { + "base": "

    A request to change the warm-up attributes for a dedicated IP address. This operation is useful when you want to resume the warm-up process for an existing IP address.

    ", + "refs": { + } + }, + "PutDedicatedIpWarmupAttributesResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "PutDeliverabilityDashboardOptionRequest": { + "base": "

    Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email using Amazon SES API v2. You also gain the ability to perform predictive inbox placement tests.

    When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

    ", + "refs": { + } + }, + "PutDeliverabilityDashboardOptionResponse": { + "base": "

    A response that indicates whether the Deliverability dashboard is enabled.

    ", + "refs": { + } + }, + "PutEmailIdentityDkimAttributesRequest": { + "base": "

    A request to enable or disable DKIM signing of email that you send from an email identity.

    ", + "refs": { + } + }, + "PutEmailIdentityDkimAttributesResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "PutEmailIdentityFeedbackAttributesRequest": { + "base": "

    A request to set the attributes that control how bounce and complaint events are processed.

    ", + "refs": { + } + }, + "PutEmailIdentityFeedbackAttributesResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "PutEmailIdentityMailFromAttributesRequest": { + "base": "

    A request to configure the custom MAIL FROM domain for a verified identity.

    ", + "refs": { + } + }, + "PutEmailIdentityMailFromAttributesResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "RawMessage": { + "base": "

    The raw email message.

    ", + "refs": { + "EmailContent$Raw": "

    The raw email message. The message has to meet the following criteria:

    • The message has to contain a header and a body, separated by one blank line.

    • All of the required header fields must be present in the message.

    • Each part of a multipart MIME message must be formatted properly.

    • If you include attachments, they must be in a file format that the Amazon SES API v2 supports.

    • The entire message must be Base64 encoded.

    • If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.

    • The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321.

    " + } + }, + "RawMessageData": { + "base": "

    The raw email message. The message has to meet the following criteria:

    • The message has to contain a header and a body, separated by one blank line.

    • All of the required header fields must be present in the message.

    • Each part of a multipart MIME message must be formatted properly.

    • Attachments must be in a file format that the Amazon SES API v2 supports.

    • The entire message must be Base64 encoded.

    • If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.

    • The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321.

    ", + "refs": { + "RawMessage$Data": "

    The raw email message. The message has to meet the following criteria:

    • The message has to contain a header and a body, separated by one blank line.

    • All of the required header fields must be present in the message.

    • Each part of a multipart MIME message must be formatted properly.

    • Attachments must be in a file format that the Amazon SES API v2 supports.

    • The entire message must be Base64 encoded.

    • If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients' email clients render the message properly.

    • The length of any single line of text in the message can't exceed 1,000 characters. This restriction is defined in RFC 5321.

    " + } + }, + "RblName": { + "base": "

    The name of a blacklist that an IP address was found on.

    ", + "refs": { + "BlacklistEntry$RblName": "

    The name of the blacklist that the IP address appears on.

    " + } + }, + "ReportId": { + "base": "

    A unique string that identifies a Deliverability dashboard report.

    ", + "refs": { + "CreateDeliverabilityTestReportResponse$ReportId": "

    A unique string that identifies the predictive inbox placement test.

    ", + "DeliverabilityTestReport$ReportId": "

    A unique string that identifies the predictive inbox placement test.

    ", + "GetDeliverabilityTestReportRequest$ReportId": "

    A unique string that identifies the predictive inbox placement test.

    " + } + }, + "ReportName": { + "base": "

    A name that helps you identify a report generated by the Deliverability dashboard.

    ", + "refs": { + "CreateDeliverabilityTestReportRequest$ReportName": "

    A unique name that helps you to identify the predictive inbox placement test when you retrieve the results.

    ", + "DeliverabilityTestReport$ReportName": "

    A name that helps you identify a predictive inbox placement test report.

    " + } + }, + "ReputationOptions": { + "base": "

    Enable or disable collection of reputation metrics for emails that you send using this configuration set in the current AWS Region.

    ", + "refs": { + "CreateConfigurationSetRequest$ReputationOptions": "

    An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set.

    ", + "GetConfigurationSetResponse$ReputationOptions": "

    An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set.

    " + } + }, + "SendEmailRequest": { + "base": "

    A request to send an email message.

    ", + "refs": { + } + }, + "SendEmailResponse": { + "base": "

    A unique message ID that you receive when an email is accepted for sending.

    ", + "refs": { + } + }, + "SendQuota": { + "base": "

    An object that contains information about the per-day and per-second sending limits for your Amazon SES account in the current AWS Region.

    ", + "refs": { + "GetAccountResponse$SendQuota": "

    An object that contains information about the per-day and per-second sending limits for your Amazon SES account in the current AWS Region.

    " + } + }, + "SendingOptions": { + "base": "

    Used to enable or disable email sending for messages that use this configuration set in the current AWS Region.

    ", + "refs": { + "CreateConfigurationSetRequest$SendingOptions": "

    An object that defines whether or not Amazon SES can send email that you send using the configuration set.

    ", + "GetConfigurationSetResponse$SendingOptions": "

    An object that defines whether or not Amazon SES can send email that you send using the configuration set.

    " + } + }, + "SendingPausedException": { + "base": "

    The message can't be sent because the account's ability to send email is currently paused.

    ", + "refs": { + } + }, + "SendingPoolName": { + "base": "

    The name of the dedicated IP pool that you want to associate with the configuration set.

    ", + "refs": { + "PutConfigurationSetDeliveryOptionsRequest$SendingPoolName": "

    The name of the dedicated IP pool that you want to associate with the configuration set.

    " + } + }, + "SentLast24Hours": { + "base": null, + "refs": { + "SendQuota$SentLast24Hours": "

    The number of emails sent from your Amazon SES account in the current AWS Region over the past 24 hours.

    " + } + }, + "SnsDestination": { + "base": "

    An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.

    ", + "refs": { + "EventDestination$SnsDestination": "

    An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.

    ", + "EventDestinationDefinition$SnsDestination": "

    An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.

    " + } + }, + "Subject": { + "base": null, + "refs": { + "DomainDeliverabilityCampaign$Subject": "

    The subject line, or title, of the email message.

    " + } + }, + "Tag": { + "base": "

    An object that defines the tags that are associated with a resource. A tag is a label that you optionally define and associate with a resource. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.

    Each tag consists of a required tag key and an associated tag value, both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:

    • Tag keys and values are case sensitive.

    • For each associated resource, each tag key must be unique and it can have only one value.

    • The aws: prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can't edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.

    • You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.

    ", + "refs": { + "TagList$member": null + } + }, + "TagKey": { + "base": null, + "refs": { + "Tag$Key": "

    One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.

    ", + "TagKeyList$member": null + } + }, + "TagKeyList": { + "base": null, + "refs": { + "UntagResourceRequest$TagKeys": "

    The tags (tag keys) that you want to remove from the resource. When you specify a tag key, the action removes both that key and its associated tag value.

    To remove more than one tag from the resource, append the TagKeys parameter and argument for each additional tag to remove, separated by an ampersand. For example: /v2/email/tags?ResourceArn=ResourceArn&TagKeys=Key1&TagKeys=Key2

    " + } + }, + "TagList": { + "base": null, + "refs": { + "CreateConfigurationSetRequest$Tags": "

    An array of objects that define the tags (keys and values) that you want to associate with the configuration set.

    ", + "CreateDedicatedIpPoolRequest$Tags": "

    An object that defines the tags (keys and values) that you want to associate with the pool.

    ", + "CreateDeliverabilityTestReportRequest$Tags": "

    An array of objects that define the tags (keys and values) that you want to associate with the predictive inbox placement test.

    ", + "CreateEmailIdentityRequest$Tags": "

    An array of objects that define the tags (keys and values) that you want to associate with the email identity.

    ", + "GetConfigurationSetResponse$Tags": "

    An array of objects that define the tags (keys and values) that are associated with the configuration set.

    ", + "GetDeliverabilityTestReportResponse$Tags": "

    An array of objects that define the tags (keys and values) that are associated with the predictive inbox placement test.

    ", + "GetEmailIdentityResponse$Tags": "

    An array of objects that define the tags (keys and values) that are associated with the email identity.

    ", + "ListTagsForResourceResponse$Tags": "

    An array that lists all the tags that are associated with the resource. Each tag consists of a required tag key (Key) and an associated tag value (Value)

    ", + "TagResourceRequest$Tags": "

    A list of the tags that you want to add to the resource. A tag consists of a required tag key (Key) and an associated tag value (Value). The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.

    " + } + }, + "TagResourceRequest": { + "base": null, + "refs": { + } + }, + "TagResourceResponse": { + "base": null, + "refs": { + } + }, + "TagValue": { + "base": null, + "refs": { + "Tag$Value": "

    The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don't want a resource to have a specific tag value, don't specify a value for this parameter. If you don't specify a value, Amazon SES sets the value to an empty string.

    " + } + }, + "Template": { + "base": "

    An object that defines the email template to use for an email message, and the values to use for any message variables in that template. An email template is a type of message template that contains content that you want to define, save, and reuse in email messages that you send.

    ", + "refs": { + "EmailContent$Template": "

    The template to use for the email message.

    " + } + }, + "TemplateArn": { + "base": null, + "refs": { + "Template$TemplateArn": "

    The Amazon Resource Name (ARN) of the template.

    " + } + }, + "TemplateData": { + "base": null, + "refs": { + "Template$TemplateData": "

    An object that defines the values to use for message variables in the template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the value to use for that variable.

    " + } + }, + "Timestamp": { + "base": null, + "refs": { + "BlacklistEntry$ListingTime": "

    The time when the blacklisting event occurred, shown in Unix time format.

    ", + "DailyVolume$StartDate": "

    The date that the DailyVolume metrics apply to, in Unix time.

    ", + "DeliverabilityTestReport$CreateDate": "

    The date and time when the predictive inbox placement test was created, in Unix time format.

    ", + "DomainDeliverabilityCampaign$FirstSeenDateTime": "

    The first time, in Unix time format, when the email message was delivered to any recipient's inbox. This value can help you determine how long it took for a campaign to deliver an email message.

    ", + "DomainDeliverabilityCampaign$LastSeenDateTime": "

    The last time, in Unix time format, when the email message was delivered to any recipient's inbox. This value can help you determine how long it took for a campaign to deliver an email message.

    ", + "DomainDeliverabilityTrackingOption$SubscriptionStartDate": "

    The date, in Unix time format, when you enabled the Deliverability dashboard for the domain.

    ", + "GetDeliverabilityDashboardOptionsResponse$SubscriptionExpiryDate": "

    The date, in Unix time format, when your current subscription to the Deliverability dashboard is scheduled to expire, if your subscription is scheduled to expire at the end of the current calendar month. This value is null if you have an active subscription that isn’t due to expire at the end of the month.

    ", + "GetDomainStatisticsReportRequest$StartDate": "

    The first day (in Unix time) that you want to obtain domain deliverability metrics for.

    ", + "GetDomainStatisticsReportRequest$EndDate": "

    The last day (in Unix time) that you want to obtain domain deliverability metrics for. The EndDate that you specify has to be less than or equal to 30 days after the StartDate.

    ", + "ListDomainDeliverabilityCampaignsRequest$StartDate": "

    The first day, in Unix time format, that you want to obtain deliverability data for.

    ", + "ListDomainDeliverabilityCampaignsRequest$EndDate": "

    The last day, in Unix time format, that you want to obtain deliverability data for. This value has to be less than or equal to 30 days after the value of the StartDate parameter.

    " + } + }, + "TlsPolicy": { + "base": "

    Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require, messages are only delivered if a TLS connection can be established. If the value is Optional, messages can be delivered in plain text if a TLS connection can't be established.

    ", + "refs": { + "DeliveryOptions$TlsPolicy": "

    Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require, messages are only delivered if a TLS connection can be established. If the value is Optional, messages can be delivered in plain text if a TLS connection can't be established.

    ", + "PutConfigurationSetDeliveryOptionsRequest$TlsPolicy": "

    Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). If the value is Require, messages are only delivered if a TLS connection can be established. If the value is Optional, messages can be delivered in plain text if a TLS connection can't be established.

    " + } + }, + "TooManyRequestsException": { + "base": "

    Too many requests have been made to the operation.

    ", + "refs": { + } + }, + "TrackingOptions": { + "base": "

    An object that defines the tracking options for a configuration set. When you use the Amazon SES API v2 to send an email, it contains an invisible image that's used to track when recipients open your email. If your email contains links, those links are changed slightly in order to track when recipients click them.

    These images and links include references to a domain operated by AWS. You can optionally configure the Amazon SES to use a domain that you operate for these images and links.

    ", + "refs": { + "CreateConfigurationSetRequest$TrackingOptions": "

    An object that defines the open and click tracking options for emails that you send using the configuration set.

    ", + "GetConfigurationSetResponse$TrackingOptions": "

    An object that defines the open and click tracking options for emails that you send using the configuration set.

    " + } + }, + "UntagResourceRequest": { + "base": null, + "refs": { + } + }, + "UntagResourceResponse": { + "base": null, + "refs": { + } + }, + "UpdateConfigurationSetEventDestinationRequest": { + "base": "

    A request to change the settings for an event destination for a configuration set.

    ", + "refs": { + } + }, + "UpdateConfigurationSetEventDestinationResponse": { + "base": "

    An HTTP 200 response if the request succeeds, or an error message if the request fails.

    ", + "refs": { + } + }, + "Volume": { + "base": "

    An object that contains information about inbox placement volume.

    ", + "refs": { + "DomainDeliverabilityCampaign$InboxCount": "

    The number of email messages that were delivered to recipients’ inboxes.

    ", + "DomainDeliverabilityCampaign$SpamCount": "

    The number of email messages that were delivered to recipients' spam or junk mail folders.

    ", + "DomainDeliverabilityCampaign$ProjectedVolume": "

    The projected number of recipients that the email message was sent to.

    ", + "DomainIspPlacement$InboxRawCount": "

    The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.

    ", + "DomainIspPlacement$SpamRawCount": "

    The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.

    ", + "VolumeStatistics$InboxRawCount": "

    The total number of emails that arrived in recipients' inboxes.

    ", + "VolumeStatistics$SpamRawCount": "

    The total number of emails that arrived in recipients' spam or junk mail folders.

    ", + "VolumeStatistics$ProjectedInbox": "

    An estimate of the percentage of emails sent from the current domain that will arrive in recipients' inboxes.

    ", + "VolumeStatistics$ProjectedSpam": "

    An estimate of the percentage of emails sent from the current domain that will arrive in recipients' spam or junk mail folders.

    " + } + }, + "VolumeStatistics": { + "base": "

    An object that contains information about the amount of email that was delivered to recipients.

    ", + "refs": { + "DailyVolume$VolumeStatistics": "

    An object that contains inbox placement metrics for a specific day in the analysis period.

    ", + "OverallVolume$VolumeStatistics": "

    An object that contains information about the numbers of messages that arrived in recipients' inboxes and junk mail folders.

    " + } + }, + "WarmupStatus": { + "base": "

    The warmup status of a dedicated IP.

    ", + "refs": { + "DedicatedIp$WarmupStatus": "

    The warm-up status of a dedicated IP address. The status can have one of the following values:

    • IN_PROGRESS – The IP address isn't ready to use because the dedicated IP warm-up process is ongoing.

    • DONE – The dedicated IP warm-up process is complete, and the IP address is ready to use.

    " + } + } + } +} diff --git a/models/apis/sesv2/2019-09-27/examples-1.json b/models/apis/sesv2/2019-09-27/examples-1.json new file mode 100644 index 00000000000..0ea7e3b0bbe --- /dev/null +++ b/models/apis/sesv2/2019-09-27/examples-1.json @@ -0,0 +1,5 @@ +{ + "version": "1.0", + "examples": { + } +} diff --git a/models/apis/sesv2/2019-09-27/paginators-1.json b/models/apis/sesv2/2019-09-27/paginators-1.json new file mode 100644 index 00000000000..8a17fd2e79a --- /dev/null +++ b/models/apis/sesv2/2019-09-27/paginators-1.json @@ -0,0 +1,34 @@ +{ + "pagination": { + "GetDedicatedIps": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListConfigurationSets": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListDedicatedIpPools": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListDeliverabilityTestReports": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListDomainDeliverabilityCampaigns": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + }, + "ListEmailIdentities": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "PageSize" + } + } +} diff --git a/models/apis/ssm/2014-11-06/api-2.json b/models/apis/ssm/2014-11-06/api-2.json index d097da9c18e..ab18c2e3217 100644 --- a/models/apis/ssm/2014-11-06/api-2.json +++ b/models/apis/ssm/2014-11-06/api-2.json @@ -310,7 +310,8 @@ "output":{"shape":"DeleteResourceDataSyncResult"}, "errors":[ {"shape":"InternalServerError"}, - {"shape":"ResourceDataSyncNotFoundException"} + {"shape":"ResourceDataSyncNotFoundException"}, + {"shape":"ResourceDataSyncInvalidConfigurationException"} ] }, "DeregisterManagedInstance":{ @@ -1004,6 +1005,7 @@ "output":{"shape":"GetOpsSummaryResult"}, "errors":[ {"shape":"InternalServerError"}, + {"shape":"ResourceDataSyncNotFoundException"}, {"shape":"InvalidFilter"}, {"shape":"InvalidNextToken"}, {"shape":"InvalidTypeNameException"}, @@ -1281,6 +1283,7 @@ "input":{"shape":"ListResourceDataSyncRequest"}, "output":{"shape":"ListResourceDataSyncResult"}, "errors":[ + {"shape":"ResourceDataSyncInvalidConfigurationException"}, {"shape":"InternalServerError"}, {"shape":"InvalidNextToken"} ] @@ -2198,6 +2201,10 @@ "type":"string", "enum":["Sha256"] }, + "AttachmentIdentifier":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_\\-.]{3,128}$" + }, "AttachmentInformation":{ "type":"structure", "members":{ @@ -2217,17 +2224,21 @@ "type":"structure", "members":{ "Key":{"shape":"AttachmentsSourceKey"}, - "Values":{"shape":"AttachmentsSourceValues"} + "Values":{"shape":"AttachmentsSourceValues"}, + "Name":{"shape":"AttachmentIdentifier"} } }, "AttachmentsSourceKey":{ "type":"string", - "enum":["SourceUrl"] + "enum":[ + "SourceUrl", + "S3FileUrl" + ] }, "AttachmentsSourceList":{ "type":"list", "member":{"shape":"AttachmentsSource"}, - "max":1, + "max":20, "min":0 }, "AttachmentsSourceValue":{ @@ -3025,7 +3036,9 @@ "RelatedOpsItems":{"shape":"RelatedOpsItems"}, "Source":{"shape":"OpsItemSource"}, "Title":{"shape":"OpsItemTitle"}, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"TagList"}, + "Category":{"shape":"OpsItemCategory"}, + "Severity":{"shape":"OpsItemSeverity"} } }, "CreateOpsItemResponse":{ @@ -3067,13 +3080,12 @@ }, "CreateResourceDataSyncRequest":{ "type":"structure", - "required":[ - "SyncName", - "S3Destination" - ], + "required":["SyncName"], "members":{ "SyncName":{"shape":"ResourceDataSyncName"}, - "S3Destination":{"shape":"ResourceDataSyncS3Destination"} + "S3Destination":{"shape":"ResourceDataSyncS3Destination"}, + "SyncType":{"shape":"ResourceDataSyncType"}, + "SyncSource":{"shape":"ResourceDataSyncSource"} } }, "CreateResourceDataSyncResult":{ @@ -3213,7 +3225,8 @@ "type":"structure", "required":["SyncName"], "members":{ - "SyncName":{"shape":"ResourceDataSyncName"} + "SyncName":{"shape":"ResourceDataSyncName"}, + "SyncType":{"shape":"ResourceDataSyncType"} } }, "DeleteResourceDataSyncResult":{ @@ -4598,10 +4611,11 @@ }, "GetOpsSummaryRequest":{ "type":"structure", - "required":["Aggregators"], "members":{ + "SyncName":{"shape":"ResourceDataSyncName"}, "Filters":{"shape":"OpsFilterList"}, "Aggregators":{"shape":"OpsAggregatorList"}, + "ResultAttributes":{"shape":"OpsResultAttributeList"}, "NextToken":{"shape":"NextToken"}, "MaxResults":{ "shape":"MaxResults", @@ -5944,6 +5958,7 @@ "ListResourceDataSyncRequest":{ "type":"structure", "members":{ + "SyncType":{"shape":"ResourceDataSyncType"}, "NextToken":{"shape":"NextToken"}, "MaxResults":{ "shape":"MaxResults", @@ -6504,11 +6519,11 @@ "type":"string", "max":20, "min":1, - "pattern":"^(range|count)" + "pattern":"^(range|count|sum)" }, "OpsAggregatorValue":{ "type":"string", - "max":512, + "max":2048, "min":0 }, "OpsAggregatorValueKey":{ @@ -6541,9 +6556,14 @@ "OpsEntityItem":{ "type":"structure", "members":{ + "CaptureTime":{"shape":"OpsEntityItemCaptureTime"}, "Content":{"shape":"OpsEntityItemEntryList"} } }, + "OpsEntityItemCaptureTime":{ + "type":"string", + "pattern":"^(20)[0-9][0-9]-(0[1-9]|1[012])-([12][0-9]|3[01]|0[1-9])(T)(2[0-3]|[0-1][0-9])(:[0-5][0-9])(:[0-5][0-9])(Z)$" + }, "OpsEntityItemEntry":{ "type":"map", "key":{"shape":"AttributeName"}, @@ -6624,7 +6644,9 @@ "Version":{"shape":"String"}, "Title":{"shape":"OpsItemTitle"}, "Source":{"shape":"OpsItemSource"}, - "OperationalData":{"shape":"OpsItemOperationalData"} + "OperationalData":{"shape":"OpsItemOperationalData"}, + "Category":{"shape":"OpsItemCategory"}, + "Severity":{"shape":"OpsItemSeverity"} } }, "OpsItemAlreadyExistsException":{ @@ -6635,6 +6657,11 @@ }, "exception":true }, + "OpsItemCategory":{ + "type":"string", + "max":64, + "min":1 + }, "OpsItemDataKey":{ "type":"string", "max":128, @@ -6688,7 +6715,9 @@ "OperationalDataKey", "OperationalDataValue", "ResourceId", - "AutomationId" + "AutomationId", + "Category", + "Severity" ] }, "OpsItemFilterOperator":{ @@ -6771,6 +6800,11 @@ "max":5, "min":1 }, + "OpsItemSeverity":{ + "type":"string", + "max":64, + "min":1 + }, "OpsItemSource":{ "type":"string", "max":64, @@ -6800,7 +6834,9 @@ "Status":{"shape":"OpsItemStatus"}, "OpsItemId":{"shape":"OpsItemId"}, "Title":{"shape":"OpsItemTitle"}, - "OperationalData":{"shape":"OpsItemOperationalData"} + "OperationalData":{"shape":"OpsItemOperationalData"}, + "Category":{"shape":"OpsItemCategory"}, + "Severity":{"shape":"OpsItemSeverity"} } }, "OpsItemTitle":{ @@ -6808,6 +6844,19 @@ "max":1024, "min":1 }, + "OpsResultAttribute":{ + "type":"structure", + "required":["TypeName"], + "members":{ + "TypeName":{"shape":"OpsDataTypeName"} + } + }, + "OpsResultAttributeList":{ + "type":"list", + "member":{"shape":"OpsResultAttribute"}, + "max":1, + "min":1 + }, "OutputSource":{ "type":"structure", "members":{ @@ -7737,6 +7786,14 @@ }, "exception":true }, + "ResourceDataSyncAwsOrganizationsSource":{ + "type":"structure", + "required":["OrganizationSourceType"], + "members":{ + "OrganizationSourceType":{"shape":"ResourceDataSyncOrganizationSourceType"}, + "OrganizationalUnits":{"shape":"ResourceDataSyncOrganizationalUnitList"} + } + }, "ResourceDataSyncCountExceededException":{ "type":"structure", "members":{ @@ -7745,6 +7802,7 @@ "exception":true }, "ResourceDataSyncCreatedTime":{"type":"timestamp"}, + "ResourceDataSyncIncludeFutureRegions":{"type":"boolean"}, "ResourceDataSyncInvalidConfigurationException":{ "type":"structure", "members":{ @@ -7756,9 +7814,12 @@ "type":"structure", "members":{ "SyncName":{"shape":"ResourceDataSyncName"}, + "SyncType":{"shape":"ResourceDataSyncType"}, + "SyncSource":{"shape":"ResourceDataSyncSourceWithState"}, "S3Destination":{"shape":"ResourceDataSyncS3Destination"}, "LastSyncTime":{"shape":"LastResourceDataSyncTime"}, "LastSuccessfulSyncTime":{"shape":"LastSuccessfulResourceDataSyncTime"}, + "SyncLastModifiedTime":{"shape":"ResourceDataSyncLastModifiedTime"}, "LastStatus":{"shape":"LastResourceDataSyncStatus"}, "SyncCreatedTime":{"shape":"ResourceDataSyncCreatedTime"}, "LastSyncStatusMessage":{"shape":"LastResourceDataSyncMessage"} @@ -7768,6 +7829,7 @@ "type":"list", "member":{"shape":"ResourceDataSyncItem"} }, + "ResourceDataSyncLastModifiedTime":{"type":"timestamp"}, "ResourceDataSyncName":{ "type":"string", "max":64, @@ -7776,10 +7838,35 @@ "ResourceDataSyncNotFoundException":{ "type":"structure", "members":{ - "SyncName":{"shape":"ResourceDataSyncName"} + "SyncName":{"shape":"ResourceDataSyncName"}, + "SyncType":{"shape":"ResourceDataSyncType"}, + "Message":{"shape":"String"} }, "exception":true }, + "ResourceDataSyncOrganizationSourceType":{ + "type":"string", + "max":64, + "min":1 + }, + "ResourceDataSyncOrganizationalUnit":{ + "type":"structure", + "members":{ + "OrganizationalUnitId":{"shape":"ResourceDataSyncOrganizationalUnitId"} + } + }, + "ResourceDataSyncOrganizationalUnitId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}$" + }, + "ResourceDataSyncOrganizationalUnitList":{ + "type":"list", + "member":{"shape":"ResourceDataSyncOrganizationalUnit"}, + "max":1000, + "min":1 + }, "ResourceDataSyncS3BucketName":{ "type":"string", "max":2048, @@ -7814,6 +7901,53 @@ "max":64, "min":1 }, + "ResourceDataSyncSource":{ + "type":"structure", + "required":[ + "SourceType", + "SourceRegions" + ], + "members":{ + "SourceType":{"shape":"ResourceDataSyncSourceType"}, + "AwsOrganizationsSource":{"shape":"ResourceDataSyncAwsOrganizationsSource"}, + "SourceRegions":{"shape":"ResourceDataSyncSourceRegionList"}, + "IncludeFutureRegions":{"shape":"ResourceDataSyncIncludeFutureRegions"} + } + }, + "ResourceDataSyncSourceRegion":{ + "type":"string", + "max":64, + "min":1 + }, + "ResourceDataSyncSourceRegionList":{ + "type":"list", + "member":{"shape":"ResourceDataSyncSourceRegion"} + }, + "ResourceDataSyncSourceType":{ + "type":"string", + "max":64, + "min":1 + }, + "ResourceDataSyncSourceWithState":{ + "type":"structure", + "members":{ + "SourceType":{"shape":"ResourceDataSyncSourceType"}, + "AwsOrganizationsSource":{"shape":"ResourceDataSyncAwsOrganizationsSource"}, + "SourceRegions":{"shape":"ResourceDataSyncSourceRegionList"}, + "IncludeFutureRegions":{"shape":"ResourceDataSyncIncludeFutureRegions"}, + "State":{"shape":"ResourceDataSyncState"} + } + }, + "ResourceDataSyncState":{ + "type":"string", + "max":64, + "min":1 + }, + "ResourceDataSyncType":{ + "type":"string", + "max":64, + "min":1 + }, "ResourceId":{"type":"string"}, "ResourceInUseException":{ "type":"structure", @@ -8828,7 +8962,9 @@ "RelatedOpsItems":{"shape":"RelatedOpsItems"}, "Status":{"shape":"OpsItemStatus"}, "OpsItemId":{"shape":"OpsItemId"}, - "Title":{"shape":"OpsItemTitle"} + "Title":{"shape":"OpsItemTitle"}, + "Category":{"shape":"OpsItemCategory"}, + "Severity":{"shape":"OpsItemSeverity"} } }, "UpdateOpsItemResponse":{ diff --git a/models/apis/ssm/2014-11-06/docs-2.json b/models/apis/ssm/2014-11-06/docs-2.json index 7e032b60493..9b3faa9c254 100644 --- a/models/apis/ssm/2014-11-06/docs-2.json +++ b/models/apis/ssm/2014-11-06/docs-2.json @@ -12,7 +12,7 @@ "CreateMaintenanceWindow": "

    Creates a new maintenance window.

    The value you specify for Duration determines the specific end time for the maintenance window based on the time it begins. No maintenance window tasks are permitted to start after the resulting endtime minus the number of hours you specify for Cutoff. For example, if the maintenance window starts at 3 PM, the duration is three hours, and the value you specify for Cutoff is one hour, no maintenance window tasks can start after 5 PM.

    ", "CreateOpsItem": "

    Creates a new OpsItem. You must have permission in AWS Identity and Access Management (IAM) to create a new OpsItem. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

    Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

    ", "CreatePatchBaseline": "

    Creates a patch baseline.

    For information about valid key and value pairs in PatchFilters for each supported operating system type, see PatchFilter.

    ", - "CreateResourceDataSync": "

    Creates a resource data sync configuration to a single bucket in Amazon S3. This is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data to the Amazon S3 bucket. To check the status of the sync, use the ListResourceDataSync.

    By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. For more information, see Configuring Resource Data Sync for Inventory in the AWS Systems Manager User Guide.

    ", + "CreateResourceDataSync": "

    A resource data sync helps you view data from multiple sources in a single location. Systems Manager offers two types of resource data sync: SyncToDestination and SyncFromSource.

    You can configure Systems Manager Inventory to use the SyncToDestination type to synchronize Inventory data from multiple AWS Regions to a single Amazon S3 bucket. For more information, see Configuring Resource Data Sync for Inventory in the AWS Systems Manager User Guide.

    You can configure Systems Manager Explorer to use the SyncToDestination type to synchronize operational work items (OpsItems) and operational data (OpsData) from multiple AWS Regions to a single Amazon S3 bucket. You can also configure Explorer to use the SyncFromSource type. This type synchronizes OpsItems and OpsData from multiple AWS accounts and Regions by using AWS Organizations. For more information, see Setting Up Explorer to Display Data from Multiple Accounts and Regions in the AWS Systems Manager User Guide.

    A resource data sync is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data. To check the status of a sync, use the ListResourceDataSync.

    By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy.

    ", "DeleteActivation": "

    Deletes an activation. You are not required to delete an activation. If you delete an activation, you can no longer use it to register additional managed instances. Deleting an activation does not de-register managed instances. You must manually de-register managed instances.

    ", "DeleteAssociation": "

    Disassociates the specified Systems Manager document from the specified instance.

    When you disassociate a document from an instance, it does not change the configuration of the instance. To change the configuration state of an instance after you disassociate a document, you must create a new document with the desired configuration and associate it with the instance.

    ", "DeleteDocument": "

    Deletes the Systems Manager document and all instance associations to the document.

    Before you delete the document, we recommend that you use DeleteAssociation to disassociate all instances that are associated with the document.

    ", @@ -21,7 +21,7 @@ "DeleteParameter": "

    Delete a parameter from the system.

    ", "DeleteParameters": "

    Delete a list of parameters.

    ", "DeletePatchBaseline": "

    Deletes a patch baseline.

    ", - "DeleteResourceDataSync": "

    Deletes a Resource Data Sync configuration. After the configuration is deleted, changes to inventory data on managed instances are no longer synced with the target Amazon S3 bucket. Deleting a sync configuration does not delete data in the target Amazon S3 bucket.

    ", + "DeleteResourceDataSync": "

    Deletes a Resource Data Sync configuration. After the configuration is deleted, changes to data on managed instances are no longer synced to or from the target. Deleting a sync configuration does not delete data.

    ", "DeregisterManagedInstance": "

    Removes the server or virtual machine from the list of registered servers. You can reregister the instance again at any time. If you don't plan to use Run Command on the server, we suggest uninstalling SSM Agent first.

    ", "DeregisterPatchBaselineForPatchGroup": "

    Removes a patch group from a patch baseline.

    ", "DeregisterTargetFromMaintenanceWindow": "

    Removes a target from a maintenance window.

    ", @@ -52,7 +52,7 @@ "DescribeMaintenanceWindows": "

    Retrieves the maintenance windows in an AWS account.

    ", "DescribeMaintenanceWindowsForTarget": "

    Retrieves information about the maintenance window targets or tasks that an instance is associated with.

    ", "DescribeOpsItems": "

    Query a set of OpsItems. You must have permission in AWS Identity and Access Management (IAM) to query a list of OpsItems. For more information, see Getting Started with OpsCenter in the AWS Systems Manager User Guide.

    Operations engineers and IT professionals use OpsCenter to view, investigate, and remediate operational issues impacting the performance and health of their AWS resources. For more information, see AWS Systems Manager OpsCenter in the AWS Systems Manager User Guide.

    ", - "DescribeParameters": "

    Get information about a parameter.

    Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

    ", + "DescribeParameters": "

    Get information about a parameter.

    Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

    ", "DescribePatchBaselines": "

    Lists the patch baselines in your AWS account.

    ", "DescribePatchGroupState": "

    Returns high-level aggregated patch compliance state for a patch group.

    ", "DescribePatchGroups": "

    Lists all patch groups that have been registered with patch baselines.

    ", @@ -76,7 +76,7 @@ "GetParameter": "

    Get information about a parameter by using the parameter name. Don't confuse this API action with the GetParameters API action.

    ", "GetParameterHistory": "

    Query a list of all parameters used by the AWS account.

    ", "GetParameters": "

    Get details of a parameter. Don't confuse this API action with the GetParameter API action.

    ", - "GetParametersByPath": "

    Retrieve parameters in a specific hierarchy. For more information, see Working with Systems Manager Parameters in the AWS Systems Manager User Guide.

    Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

    This API action doesn't support filtering by tags.

    ", + "GetParametersByPath": "

    Retrieve information about one or more parameters in a specific hierarchy.

    Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

    ", "GetPatchBaseline": "

    Retrieves information about a patch baseline.

    ", "GetPatchBaselineForPatchGroup": "

    Retrieves the patch baseline that should be used for the specified patch group.

    ", "GetServiceSetting": "

    ServiceSetting is an account-level setting for an AWS service. This setting defines how a user interacts with or uses a service or a feature of a service. For example, if an AWS service charges money to the account based on feature or service usage, then the AWS service team might create a default setting of \"false\". This means the user can't use this feature unless they change the setting to \"true\" and intentionally opt in for a paid feature.

    Services map a SettingId object to a setting value. AWS services teams define the default value for a SettingId. You can't create a new SettingId, but you can overwrite the default value if you have the ssm:UpdateServiceSetting permission for the setting. Use the UpdateServiceSetting API action to change the default setting. Or use the ResetServiceSetting to change the value back to the original value defined by the AWS service team.

    Query the current service setting for the account.

    ", @@ -528,6 +528,12 @@ "AttachmentContent$HashType": "

    The hash algorithm used to calculate the hash value.

    " } }, + "AttachmentIdentifier": { + "base": null, + "refs": { + "AttachmentsSource$Name": "

    The name of the document attachment file.

    " + } + }, "AttachmentInformation": { "base": "

    An attribute of an attachment, such as the attachment name.

    ", "refs": { @@ -554,7 +560,7 @@ } }, "AttachmentsSource": { - "base": "

    A key and value pair that identifies the location of an attachment to a document.

    ", + "base": "

    Identifying information about a document attachment, including the file name and a key-value pair that identifies the location of an attachment to a document.

    ", "refs": { "AttachmentsSourceList$member": null } @@ -562,7 +568,7 @@ "AttachmentsSourceKey": { "base": null, "refs": { - "AttachmentsSource$Key": "

    The key of a key and value pair that identifies the location of an attachment to a document.

    " + "AttachmentsSource$Key": "

    The key of a key-value pair that identifies the location of an attachment to a document.

    " } }, "AttachmentsSourceList": { @@ -581,7 +587,7 @@ "AttachmentsSourceValues": { "base": null, "refs": { - "AttachmentsSource$Values": "

    The URL of the location of a document attachment, such as the URL of an Amazon S3 bucket.

    " + "AttachmentsSource$Values": "

    The value of a key-value pair that identifies the location of an attachment to a document. The format is the URL of the location of a document attachment, such as the URL of an Amazon S3 bucket.

    " } }, "AttributeName": { @@ -907,7 +913,7 @@ "CommandFilterList": { "base": null, "refs": { - "ListCommandInvocationsRequest$Filters": "

    (Optional) One or more filters. Use a filter to return a more specific list of results.

    ", + "ListCommandInvocationsRequest$Filters": "

    (Optional) One or more filters. Use a filter to return a more specific list of results. Note that the DocumentName filter is not supported for ListCommandInvocations.

    ", "ListCommandsRequest$Filters": "

    (Optional) One or more filters. Use a filter to return a more specific list of results.

    " } }, @@ -2700,7 +2706,7 @@ "ListCommandsRequest$InstanceId": "

    (Optional) Lists commands issued against this instance ID.

    ", "ListInventoryEntriesRequest$InstanceId": "

    The instance ID for which you want inventory information.

    ", "ListInventoryEntriesResult$InstanceId": "

    The instance ID targeted by the request to query inventory information.

    ", - "PutInventoryRequest$InstanceId": "

    One or more instance IDs where you want to add or update inventory items.

    ", + "PutInventoryRequest$InstanceId": "

    An instance ID where you want to add or update inventory items.

    ", "UpdateAssociationStatusRequest$InstanceId": "

    The ID of the instance.

    " } }, @@ -4443,7 +4449,8 @@ "OpsDataTypeName": { "base": null, "refs": { - "OpsAggregator$TypeName": "

    The data type name to use for viewing counts of OpsItems.

    " + "OpsAggregator$TypeName": "

    The data type name to use for viewing counts of OpsItems.

    ", + "OpsResultAttribute$TypeName": "

    Name of the data type. Valid value: AWS:OpsItem, AWS:EC2InstanceInformation, AWS:OpsItemTrendline, or AWS:ComplianceSummary.

    " } }, "OpsEntity": { @@ -4464,6 +4471,12 @@ "OpsEntityItemMap$value": null } }, + "OpsEntityItemCaptureTime": { + "base": null, + "refs": { + "OpsEntityItem$CaptureTime": "

    The time OpsItem data was captured.

    " + } + }, "OpsEntityItemEntry": { "base": null, "refs": { @@ -4542,6 +4555,15 @@ "refs": { } }, + "OpsItemCategory": { + "base": null, + "refs": { + "CreateOpsItemRequest$Category": "

    Specify a category to assign to an OpsItem.

    ", + "OpsItem$Category": "

    An OpsItem category. Category options include: Availability, Cost, Performance, Recovery, Security.

    ", + "OpsItemSummary$Category": "

    A list of OpsItems by category.

    ", + "UpdateOpsItemRequest$Category": "

    Specify a new category for an OpsItem.

    " + } + }, "OpsItemDataKey": { "base": null, "refs": { @@ -4685,6 +4707,15 @@ "UpdateOpsItemRequest$Priority": "

    The importance of this OpsItem in relation to other OpsItems in the system.

    " } }, + "OpsItemSeverity": { + "base": null, + "refs": { + "CreateOpsItemRequest$Severity": "

    Specify a severity to assign to an OpsItem.

    ", + "OpsItem$Severity": "

    The severity of the OpsItem. Severity options range from 1 to 4.

    ", + "OpsItemSummary$Severity": "

    A list of OpsItems by severity.

    ", + "UpdateOpsItemRequest$Severity": "

    Specify a new severity for an OpsItem.

    " + } + }, "OpsItemSource": { "base": null, "refs": { @@ -4722,6 +4753,18 @@ "UpdateOpsItemRequest$Title": "

    A short heading that describes the nature of the OpsItem and the impacted resource.

    " } }, + "OpsResultAttribute": { + "base": "

    The OpsItem data type to return.

    ", + "refs": { + "OpsResultAttributeList$member": null + } + }, + "OpsResultAttributeList": { + "base": null, + "refs": { + "GetOpsSummaryRequest$ResultAttributes": "

    The OpsItem data type to return.

    " + } + }, "OutputSource": { "base": "

    Information about the source where the association execution details are stored.

    ", "refs": { @@ -4921,7 +4964,7 @@ } }, "ParameterStringFilter": { - "base": "

    One or more filters. Use a filter to return a more specific list of results.

    The Name and Tier filter keys can't be used with the GetParametersByPath API action. Also, the Label filter key can't be used with the DescribeParameters API action.

    ", + "base": "

    One or more filters. Use a filter to return a more specific list of results.

    The ParameterStringFilter object is used by the DescribeParameters and GetParametersByPath API actions. However, not all of the pattern values listed for Key can be used with both actions.

    For DescribeActions, all of the listed patterns are valid, with the exception of Label.

    For GetParametersByPath, the following patterns listed for Key are not valid: Name, Path, and Tier.

    For examples of CLI commands demonstrating valid parameter filter constructions, see Searching for Systems Manager Parameters in the AWS Systems Manager User Guide.

    ", "refs": { "ParameterStringFilterList$member": null } @@ -4936,7 +4979,7 @@ "base": null, "refs": { "DescribeParametersRequest$ParameterFilters": "

    Filters to limit the request results.

    ", - "GetParametersByPathRequest$ParameterFilters": "

    Filters to limit the request results.

    You can't filter using the parameter name.

    " + "GetParametersByPathRequest$ParameterFilters": "

    Filters to limit the request results.

    " } }, "ParameterStringFilterValue": { @@ -4954,7 +4997,7 @@ "ParameterStringQueryOption": { "base": null, "refs": { - "ParameterStringFilter$Option": "

    Valid options are Equals and BeginsWith. For Path filter, valid options are Recursive and OneLevel.

    " + "ParameterStringFilter$Option": "

    For all filters used with DescribeParameters, valid options include Equals and BeginsWith. The Name filter additionally supports the Contains option. (Exception: For filters using the key Path, valid options include Recursive and OneLevel.)

    For filters used with GetParametersByPath, valid options include Equals and BeginsWith. (Exception: For filters using the key Label, the only valid option is Equals.)

    " } }, "ParameterTier": { @@ -5026,7 +5069,7 @@ "ParametersFilterList": { "base": null, "refs": { - "DescribeParametersRequest$Filters": "

    One or more filters. Use a filter to return a more specific list of results.

    " + "DescribeParametersRequest$Filters": "

    This data type is deprecated. Instead, use ParameterFilters.

    " } }, "ParametersFilterValue": { @@ -5690,6 +5733,13 @@ "refs": { } }, + "ResourceDataSyncAwsOrganizationsSource": { + "base": "

    Information about the AwsOrganizationsSource resource data sync source. A sync source of this type can synchronize data from AWS Organizations or, if an AWS Organization is not present, from multiple AWS Regions.

    ", + "refs": { + "ResourceDataSyncSource$AwsOrganizationsSource": "

    The field name in SyncSource for the ResourceDataSyncAwsOrganizationsSource type.

    ", + "ResourceDataSyncSourceWithState$AwsOrganizationsSource": "

    The field name in SyncSource for the ResourceDataSyncAwsOrganizationsSource type.

    " + } + }, "ResourceDataSyncCountExceededException": { "base": "

    You have exceeded the allowed maximum sync configurations.

    ", "refs": { @@ -5701,6 +5751,13 @@ "ResourceDataSyncItem$SyncCreatedTime": "

    The date and time the configuration was created (UTC).

    " } }, + "ResourceDataSyncIncludeFutureRegions": { + "base": null, + "refs": { + "ResourceDataSyncSource$IncludeFutureRegions": "

    Whether to automatically synchronize and aggregate data from new AWS Regions when those Regions come online.

    ", + "ResourceDataSyncSourceWithState$IncludeFutureRegions": "

    Whether to automatically synchronize and aggregate data from new AWS Regions when those Regions come online.

    " + } + }, "ResourceDataSyncInvalidConfigurationException": { "base": "

    The specified sync configuration is invalid.

    ", "refs": { @@ -5718,11 +5775,18 @@ "ListResourceDataSyncResult$ResourceDataSyncItems": "

    A list of your current Resource Data Sync configurations and their statuses.

    " } }, + "ResourceDataSyncLastModifiedTime": { + "base": null, + "refs": { + "ResourceDataSyncItem$SyncLastModifiedTime": "

    The date and time the resource data sync was changed.

    " + } + }, "ResourceDataSyncName": { "base": null, "refs": { "CreateResourceDataSyncRequest$SyncName": "

    A name for the configuration.

    ", "DeleteResourceDataSyncRequest$SyncName": "

    The name of the configuration to delete.

    ", + "GetOpsSummaryRequest$SyncName": "

    Specify the name of a resource data sync to get.

    ", "ResourceDataSyncAlreadyExistsException$SyncName": null, "ResourceDataSyncItem$SyncName": "

    The name of the Resource Data Sync.

    ", "ResourceDataSyncNotFoundException$SyncName": null @@ -5733,6 +5797,30 @@ "refs": { } }, + "ResourceDataSyncOrganizationSourceType": { + "base": null, + "refs": { + "ResourceDataSyncAwsOrganizationsSource$OrganizationSourceType": "

    If an AWS Organization is present, this is either OrganizationalUnits or EntireOrganization. For OrganizationalUnits, the data is aggregated from a set of organization units. For EntireOrganization, the data is aggregated from the entire AWS Organization.

    " + } + }, + "ResourceDataSyncOrganizationalUnit": { + "base": "

    The AWS Organizations organizational unit data source for the sync.

    ", + "refs": { + "ResourceDataSyncOrganizationalUnitList$member": null + } + }, + "ResourceDataSyncOrganizationalUnitId": { + "base": null, + "refs": { + "ResourceDataSyncOrganizationalUnit$OrganizationalUnitId": "

    The AWS Organization unit ID data source for the sync.

    " + } + }, + "ResourceDataSyncOrganizationalUnitList": { + "base": null, + "refs": { + "ResourceDataSyncAwsOrganizationsSource$OrganizationalUnits": "

    The AWS Organizations organization units included in the sync.

    " + } + }, "ResourceDataSyncS3BucketName": { "base": null, "refs": { @@ -5764,6 +5852,54 @@ "ResourceDataSyncS3Destination$Region": "

    The AWS Region with the Amazon S3 bucket targeted by the Resource Data Sync.

    " } }, + "ResourceDataSyncSource": { + "base": "

    Information about the source of the data included in the resource data sync.

    ", + "refs": { + "CreateResourceDataSyncRequest$SyncSource": "

    Specify information about the data sources to synchronize.

    " + } + }, + "ResourceDataSyncSourceRegion": { + "base": null, + "refs": { + "ResourceDataSyncSourceRegionList$member": null + } + }, + "ResourceDataSyncSourceRegionList": { + "base": null, + "refs": { + "ResourceDataSyncSource$SourceRegions": "

    The SyncSource AWS Regions included in the resource data sync.

    ", + "ResourceDataSyncSourceWithState$SourceRegions": "

    The SyncSource AWS Regions included in the resource data sync.

    " + } + }, + "ResourceDataSyncSourceType": { + "base": null, + "refs": { + "ResourceDataSyncSource$SourceType": "

    The type of data source for the resource data sync. SourceType is either AwsOrganizations (if an organization is present in AWS Organizations) or singleAccountMultiRegions.

    ", + "ResourceDataSyncSourceWithState$SourceType": "

    The type of data source for the resource data sync. SourceType is either AwsOrganizations (if an organization is present in AWS Organizations) or singleAccountMultiRegions.

    " + } + }, + "ResourceDataSyncSourceWithState": { + "base": "

    The data type name for including resource data sync state. There are four sync states:

    OrganizationNotExists (Your organization doesn't exist)

    NoPermissions (The system can't locate the service-linked role. This role is automatically created when a user creates a resource data sync in Explorer.)

    InvalidOrganizationalUnit (You specified or selected an invalid unit in the resource data sync configuration.)

    TrustedAccessDisabled (You disabled Systems Manager access in the organization in AWS Organizations.)

    ", + "refs": { + "ResourceDataSyncItem$SyncSource": "

    Information about the source where the data was synchronized.

    " + } + }, + "ResourceDataSyncState": { + "base": null, + "refs": { + "ResourceDataSyncSourceWithState$State": "

    The data type name for including resource data sync state. There are four sync states:

    OrganizationNotExists: Your organization doesn't exist.

    NoPermissions: The system can't locate the service-linked role. This role is automatically created when a user creates a resource data sync in Explorer.

    InvalidOrganizationalUnit: You specified or selected an invalid unit in the resource data sync configuration.

    TrustedAccessDisabled: You disabled Systems Manager access in the organization in AWS Organizations.

    " + } + }, + "ResourceDataSyncType": { + "base": null, + "refs": { + "CreateResourceDataSyncRequest$SyncType": "

    Specify SyncToDestination to create a resource data sync that synchronizes data from multiple AWS Regions to an Amazon S3 bucket. Specify SyncFromSource to synchronize data from multiple AWS accounts and Regions, as listed in AWS Organizations.

    ", + "DeleteResourceDataSyncRequest$SyncType": "

    Specify the type of resource data sync to delete.

    ", + "ListResourceDataSyncRequest$SyncType": "

    View a list of resource data syncs according to the sync type. Specify SyncToDestination to view resource data syncs that synchronize data to an Amazon S3 buckets. Specify SyncFromSource to view resource data syncs from AWS Organizations or from multiple AWS Regions.

    ", + "ResourceDataSyncItem$SyncType": "

    The type of resource data sync. If SyncType is SyncToDestination, then the resource data sync synchronizes data to an Amazon S3 bucket. If the SyncType is SyncFromSource then the resource data sync synchronizes data from AWS Organizations or from multiple AWS Regions.

    ", + "ResourceDataSyncNotFoundException$SyncType": null + } + }, "ResourceId": { "base": null, "refs": { @@ -6383,6 +6519,7 @@ "RelatedOpsItem$OpsItemId": "

    The ID of an OpsItem related to the current OpsItem.

    ", "ResourceDataSyncCountExceededException$Message": null, "ResourceDataSyncInvalidConfigurationException$Message": null, + "ResourceDataSyncNotFoundException$Message": null, "ResourceInUseException$Message": null, "ResourceLimitExceededException$Message": null, "ServiceSetting$LastModifiedUser": "

    The ARN of the last modified user. This field is populated only if the setting value was overwritten.

    ", @@ -6465,7 +6602,7 @@ } }, "Target": { - "base": "

    An array of search criteria that targets instances using a Key,Value combination that you specify.

    Supported formats include the following.

    • Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

    • Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

    • Key=tag-key,Values=my-tag-key-1,my-tag-key-2

    • (Maintenance window targets only) Key=resource-groups:Name,Values=resource-group-name

    • (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

    For example:

    • Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE

    • Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3

    • Key=tag-key,Values=Name,Instance-Type,CostCenter

    • (Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup

    • (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

    For information about how to send commands that target instances using Key,Value parameters, see Using Targets and Rate Controls to Send Commands to a Fleet in the AWS Systems Manager User Guide.

    ", + "base": "

    An array of search criteria that targets instances using a Key,Value combination that you specify.

    Supported formats include the following.

    • Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3

    • Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2

    • Key=tag-key,Values=my-tag-key-1,my-tag-key-2

    • (Maintenance window targets only) Key=resource-groups:Name,Values=resource-group-name

    • (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2

    For example:

    • Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE

    • Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3

    • Key=tag-key,Values=Name,Instance-Type,CostCenter

    • (Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup

      This example demonstrates how to target all resources in the resource group ProductionResourceGroup in your maintenance window.

    • (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC

      This example demonstrates how to target only Amazon EC2 instances and VPCs in your maintenance window.

    • (State Manager association targets only) Key=InstanceIds,Values=*

      This example demonstrates how to target all managed instances in the AWS Region where the association was created.

    For information about how to send commands that target instances using Key,Value parameters, see Using Targets and Rate Controls to Send Commands to a Fleet in the AWS Systems Manager User Guide.

    ", "refs": { "Targets$member": null } diff --git a/models/apis/storagegateway/2013-06-30/api-2.json b/models/apis/storagegateway/2013-06-30/api-2.json index 42d6e8e3a41..c23756a74de 100644 --- a/models/apis/storagegateway/2013-06-30/api-2.json +++ b/models/apis/storagegateway/2013-06-30/api-2.json @@ -339,6 +339,19 @@ {"shape":"InternalServerError"} ] }, + "DescribeAvailabilityMonitorTest":{ + "name":"DescribeAvailabilityMonitorTest", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeAvailabilityMonitorTestInput"}, + "output":{"shape":"DescribeAvailabilityMonitorTestOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, "DescribeBandwidthRateLimit":{ "name":"DescribeBandwidthRateLimit", "http":{ @@ -820,6 +833,19 @@ {"shape":"InternalServerError"} ] }, + "StartAvailabilityMonitorTest":{ + "name":"StartAvailabilityMonitorTest", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartAvailabilityMonitorTestInput"}, + "output":{"shape":"StartAvailabilityMonitorTestOutput"}, + "errors":[ + {"shape":"InvalidGatewayRequestException"}, + {"shape":"InternalServerError"} + ] + }, "StartGateway":{ "name":"StartGateway", "http":{ @@ -995,6 +1021,18 @@ "max":50, "min":1 }, + "ActiveDirectoryStatus":{ + "type":"string", + "enum":[ + "ACCESS_DENIED", + "DETACHED", + "JOINED", + "JOINING", + "NETWORK_ERROR", + "TIMEOUT", + "UNKNOWN_ERROR" + ] + }, "AddCacheInput":{ "type":"structure", "required":[ @@ -1107,6 +1145,14 @@ "max":15, "min":5 }, + "AvailabilityMonitorTestStatus":{ + "type":"string", + "enum":[ + "COMPLETE", + "FAILED", + "PENDING" + ] + }, "BandwidthDownloadRateLimit":{ "type":"long", "min":102400 @@ -1547,6 +1593,21 @@ "VolumeARN":{"shape":"VolumeARN"} } }, + "DescribeAvailabilityMonitorTestInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "DescribeAvailabilityMonitorTestOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"}, + "Status":{"shape":"AvailabilityMonitorTestStatus"}, + "StartTime":{"shape":"Time"} + } + }, "DescribeBandwidthRateLimitInput":{ "type":"structure", "required":["GatewayARN"], @@ -1630,7 +1691,8 @@ "Ec2InstanceRegion":{"shape":"Ec2InstanceRegion"}, "Tags":{"shape":"Tags"}, "VPCEndpoint":{"shape":"string"}, - "CloudWatchLogGroupARN":{"shape":"CloudWatchLogGroupARN"} + "CloudWatchLogGroupARN":{"shape":"CloudWatchLogGroupARN"}, + "HostEnvironment":{"shape":"HostEnvironment"} } }, "DescribeMaintenanceStartTimeInput":{ @@ -1689,6 +1751,7 @@ "members":{ "GatewayARN":{"shape":"GatewayARN"}, "DomainName":{"shape":"DomainName"}, + "ActiveDirectoryStatus":{"shape":"ActiveDirectoryStatus"}, "SMBGuestPasswordSet":{"shape":"Boolean"}, "SMBSecurityStrategy":{"shape":"SMBSecurityStrategy"} } @@ -1967,6 +2030,7 @@ "LocalStorageLimitExceeded", "LunAlreadyAllocated ", "LunInvalid", + "JoinDomainInProgress", "MaximumContentLengthExceeded", "MaximumTapeCartridgeCountExceeded", "MaximumVolumeCountExceeded", @@ -2129,6 +2193,15 @@ "type":"string", "pattern":"^(([a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9\\-]*[A-Za-z0-9])(:(\\d+))?$" }, + "HostEnvironment":{ + "type":"string", + "enum":[ + "VMWARE", + "HYPER-V", + "EC2", + "OTHER" + ] + }, "Hosts":{ "type":"list", "member":{"shape":"Host"} @@ -2186,6 +2259,7 @@ "DomainName":{"shape":"DomainName"}, "OrganizationalUnit":{"shape":"OrganizationalUnit"}, "DomainControllers":{"shape":"Hosts"}, + "TimeoutInSeconds":{"shape":"TimeoutInSeconds"}, "UserName":{"shape":"DomainUserName"}, "Password":{"shape":"DomainUserPassword"} } @@ -2193,7 +2267,8 @@ "JoinDomainOutput":{ "type":"structure", "members":{ - "GatewayARN":{"shape":"GatewayARN"} + "GatewayARN":{"shape":"GatewayARN"}, + "ActiveDirectoryStatus":{"shape":"ActiveDirectoryStatus"} } }, "KMSKey":{ @@ -2680,6 +2755,19 @@ "max":15, "min":5 }, + "StartAvailabilityMonitorTestInput":{ + "type":"structure", + "required":["GatewayARN"], + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, + "StartAvailabilityMonitorTestOutput":{ + "type":"structure", + "members":{ + "GatewayARN":{"shape":"GatewayARN"} + } + }, "StartGatewayInput":{ "type":"structure", "required":["GatewayARN"], @@ -2868,6 +2956,11 @@ "pattern":"^[-\\.;a-z0-9]+$" }, "Time":{"type":"timestamp"}, + "TimeoutInSeconds":{ + "type":"integer", + "max":3600, + "min":0 + }, "UpdateBandwidthRateLimitInput":{ "type":"structure", "required":["GatewayARN"], diff --git a/models/apis/storagegateway/2013-06-30/docs-2.json b/models/apis/storagegateway/2013-06-30/docs-2.json index 7e2e134c233..65487b8afd8 100644 --- a/models/apis/storagegateway/2013-06-30/docs-2.json +++ b/models/apis/storagegateway/2013-06-30/docs-2.json @@ -19,18 +19,19 @@ "CreateStorediSCSIVolume": "

    Creates a volume on a specified gateway. This operation is only supported in the stored volume gateway type.

    The size of the volume to create is inferred from the disk size. You can choose to preserve existing data on the disk, create volume from an existing snapshot, or create an empty volume. If you choose to create an empty gateway volume, then any existing data on the disk is erased.

    In the request you must specify the gateway and the disk information on which you are creating the volume. In response, the gateway creates the volume and returns volume information such as the volume Amazon Resource Name (ARN), its size, and the iSCSI target ARN that initiators can use to connect to the volume target.

    ", "CreateTapeWithBarcode": "

    Creates a virtual tape by using your own barcode. You write data to the virtual tape and then archive the tape. A barcode is unique and can not be reused if it has already been used on a tape . This applies to barcodes used on deleted tapes. This operation is only supported in the tape gateway type.

    Cache storage must be allocated to the gateway before you can create a virtual tape. Use the AddCache operation to add cache storage to a gateway.

    ", "CreateTapes": "

    Creates one or more virtual tapes. You write data to the virtual tapes and then archive the tapes. This operation is only supported in the tape gateway type.

    Cache storage must be allocated to the gateway before you can create virtual tapes. Use the AddCache operation to add cache storage to a gateway.

    ", - "DeleteBandwidthRateLimit": "

    Deletes the bandwidth rate limits of a gateway. You can delete either the upload and download bandwidth rate limit, or you can delete both. If you delete only one of the limits, the other limit remains unchanged. To specify which gateway to work with, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", - "DeleteChapCredentials": "

    Deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target and initiator pair.

    ", + "DeleteBandwidthRateLimit": "

    Deletes the bandwidth rate limits of a gateway. You can delete either the upload and download bandwidth rate limit, or you can delete both. If you delete only one of the limits, the other limit remains unchanged. To specify which gateway to work with, use the Amazon Resource Name (ARN) of the gateway in your request. This operation is supported for the stored volume, cached volume and tape gateway types.

    ", + "DeleteChapCredentials": "

    Deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target and initiator pair. This operation is supported in volume and tape gateway types.

    ", "DeleteFileShare": "

    Deletes a file share from a file gateway. This operation is only supported for file gateways.

    ", "DeleteGateway": "

    Deletes a gateway. To specify which gateway to delete, use the Amazon Resource Name (ARN) of the gateway in your request. The operation deletes the gateway; however, it does not delete the gateway virtual machine (VM) from your host computer.

    After you delete a gateway, you cannot reactivate it. Completed snapshots of the gateway volumes are not deleted upon deleting the gateway, however, pending snapshots will not complete. After you delete a gateway, your next step is to remove it from your environment.

    You no longer pay software charges after the gateway is deleted; however, your existing Amazon EBS snapshots persist and you will continue to be billed for these snapshots. You can choose to remove all remaining Amazon EBS snapshots by canceling your Amazon EC2 subscription.  If you prefer not to cancel your Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 console. For more information, see the AWS Storage Gateway Detail Page.

    ", "DeleteSnapshotSchedule": "

    Deletes a snapshot of a volume.

    You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. This API action enables you to delete a snapshot schedule for a volume. For more information, see Working with Snapshots. In the DeleteSnapshotSchedule request, you identify the volume by providing its Amazon Resource Name (ARN). This operation is only supported in stored and cached volume gateway types.

    To list or delete a snapshot, you must use the Amazon EC2 API. in Amazon Elastic Compute Cloud API Reference.

    ", "DeleteTape": "

    Deletes the specified virtual tape. This operation is only supported in the tape gateway type.

    ", "DeleteTapeArchive": "

    Deletes the specified virtual tape from the virtual tape shelf (VTS). This operation is only supported in the tape gateway type.

    ", "DeleteVolume": "

    Deletes the specified storage volume that you previously created using the CreateCachediSCSIVolume or CreateStorediSCSIVolume API. This operation is only supported in the cached volume and stored volume types. For stored volume gateways, the local disk that was configured as the storage volume is not deleted. You can reuse the local disk to create another storage volume.

    Before you delete a volume, make sure there are no iSCSI connections to the volume you are deleting. You should also make sure there is no snapshot in progress. You can use the Amazon Elastic Compute Cloud (Amazon EC2) API to query snapshots on the volume you are deleting and check the snapshot status. For more information, go to DescribeSnapshots in the Amazon Elastic Compute Cloud API Reference.

    In the request, you must provide the Amazon Resource Name (ARN) of the storage volume you want to delete.

    ", - "DescribeBandwidthRateLimit": "

    Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect.

    This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "DescribeAvailabilityMonitorTest": "

    Returns information about the most recent High Availability monitoring test that was performed on the host in a cluster. If a test isn't performed, the status and start time in the response would be null.

    ", + "DescribeBandwidthRateLimit": "

    Returns the bandwidth rate limits of a gateway. By default, these limits are not set, which means no bandwidth rate limiting is in effect. This operation is supported for the stored volume, cached volume and tape gateway types.'

    This operation only returns a value for a bandwidth rate limit only if the limit is set. If no limits are set for the gateway, then this operation returns only the gateway ARN in the response body. To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", "DescribeCache": "

    Returns information about the cache of a gateway. This operation is only supported in the cached volume, tape and file gateway types.

    The response includes disk IDs that are configured as cache, and it includes the amount of cache allocated and used.

    ", "DescribeCachediSCSIVolumes": "

    Returns a description of the gateway volumes specified in the request. This operation is only supported in the cached volume gateway types.

    The list of gateway volumes in the request must be from one gateway. In the response Amazon Storage Gateway returns volume information sorted by volume Amazon Resource Name (ARN).

    ", - "DescribeChapCredentials": "

    Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials information for a specified iSCSI target, one for each target-initiator pair.

    ", + "DescribeChapCredentials": "

    Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials information for a specified iSCSI target, one for each target-initiator pair. This operation is supported in the volume and tape gateway types.

    ", "DescribeGatewayInformation": "

    Returns metadata about a gateway such as its name, network interfaces, configured time zone, and the state (whether the gateway is running or not). To specify which gateway to describe, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", "DescribeMaintenanceStartTime": "

    Returns your gateway's weekly maintenance start time including the day and time of the week. Note that values are in terms of the gateway's time zone.

    ", "DescribeNFSFileShares": "

    Gets a description for one or more Network File System (NFS) file shares from a file gateway. This operation is only supported for file gateways.

    ", @@ -44,29 +45,30 @@ "DescribeUploadBuffer": "

    Returns information about the upload buffer of a gateway. This operation is supported for the stored volume, cached volume and tape gateway types.

    The response includes disk IDs that are configured as upload buffer space, and it includes the amount of upload buffer space allocated and used.

    ", "DescribeVTLDevices": "

    Returns a description of virtual tape library (VTL) devices for the specified tape gateway. In the response, AWS Storage Gateway returns VTL device information.

    This operation is only supported in the tape gateway type.

    ", "DescribeWorkingStorage": "

    Returns information about the working storage of a gateway. This operation is only supported in the stored volumes gateway type. This operation is deprecated in cached volumes API version (20120630). Use DescribeUploadBuffer instead.

    Working storage is also referred to as upload buffer. You can also use the DescribeUploadBuffer operation to add upload buffer to a stored volume gateway.

    The response includes disk IDs that are configured as working storage, and it includes the amount of working storage allocated and used.

    ", - "DetachVolume": "

    Disconnects a volume from an iSCSI connection and then detaches the volume from the specified gateway. Detaching and attaching a volume enables you to recover your data from one gateway to a different gateway without creating a snapshot. It also makes it easier to move your volumes from an on-premises gateway to a gateway hosted on an Amazon EC2 instance.

    ", + "DetachVolume": "

    Disconnects a volume from an iSCSI connection and then detaches the volume from the specified gateway. Detaching and attaching a volume enables you to recover your data from one gateway to a different gateway without creating a snapshot. It also makes it easier to move your volumes from an on-premises gateway to a gateway hosted on an Amazon EC2 instance. This operation is only supported in the volume gateway type.

    ", "DisableGateway": "

    Disables a tape gateway when the gateway is no longer functioning. For example, if your gateway VM is damaged, you can disable the gateway so you can recover virtual tapes.

    Use this operation for a tape gateway that is not reachable or not functioning. This operation is only supported in the tape gateway type.

    Once a gateway is disabled it cannot be enabled.

    ", "JoinDomain": "

    Adds a file gateway to an Active Directory domain. This operation is only supported for file gateways that support the SMB file protocol.

    ", "ListFileShares": "

    Gets a list of the file shares for a specific file gateway, or the list of file shares that belong to the calling user account. This operation is only supported for file gateways.

    ", "ListGateways": "

    Lists gateways owned by an AWS account in an AWS Region specified in the request. The returned list is ordered by gateway Amazon Resource Name (ARN).

    By default, the operation returns a maximum of 100 gateways. This operation supports pagination that allows you to optionally reduce the number of gateways returned in a response.

    If you have more gateways than are returned in a response (that is, the response returns only a truncated list of your gateways), the response contains a marker that you can specify in your next request to fetch the next page of gateways.

    ", "ListLocalDisks": "

    Returns a list of the gateway's local disks. To specify which gateway to describe, you use the Amazon Resource Name (ARN) of the gateway in the body of the request.

    The request returns a list of all disks, specifying which are configured as working storage, cache storage, or stored volume or not configured at all. The response includes a DiskStatus field. This field can have a value of present (the disk is available to use), missing (the disk is no longer connected to the gateway), or mismatch (the disk node is occupied by a disk that has incorrect metadata or the disk content is corrupted).

    ", - "ListTagsForResource": "

    Lists the tags that have been added to the specified resource. This operation is only supported in the cached volume, stored volume and tape gateway type.

    ", + "ListTagsForResource": "

    Lists the tags that have been added to the specified resource. This operation is supported in storage gateways of all types.

    ", "ListTapes": "

    Lists virtual tapes in your virtual tape library (VTL) and your virtual tape shelf (VTS). You specify the tapes to list by specifying one or more tape Amazon Resource Names (ARNs). If you don't specify a tape ARN, the operation lists all virtual tapes in both your VTL and VTS.

    This operation supports pagination. By default, the operation returns a maximum of up to 100 tapes. You can optionally specify the Limit parameter in the body to limit the number of tapes in the response. If the number of tapes returned in the response is truncated, the response includes a Marker element that you can use in your subsequent request to retrieve the next set of tapes. This operation is only supported in the tape gateway type.

    ", "ListVolumeInitiators": "

    Lists iSCSI initiators that are connected to a volume. You can use this operation to determine whether a volume is being used or not. This operation is only supported in the cached volume and stored volume gateway types.

    ", "ListVolumeRecoveryPoints": "

    Lists the recovery points for a specified gateway. This operation is only supported in the cached volume gateway type.

    Each cache volume has one recovery point. A volume recovery point is a point in time at which all data of the volume is consistent and from which you can create a snapshot or clone a new cached volume from a source volume. To create a snapshot from a volume recovery point use the CreateSnapshotFromVolumeRecoveryPoint operation.

    ", "ListVolumes": "

    Lists the iSCSI stored volumes of a gateway. Results are sorted by volume ARN. The response includes only the volume ARNs. If you want additional volume information, use the DescribeStorediSCSIVolumes or the DescribeCachediSCSIVolumes API.

    The operation supports pagination. By default, the operation returns a maximum of up to 100 volumes. You can optionally specify the Limit field in the body to limit the number of volumes in the response. If the number of volumes returned in the response is truncated, the response includes a Marker field. You can use this Marker value in your subsequent request to retrieve the next set of volumes. This operation is only supported in the cached volume and stored volume gateway types.

    ", "NotifyWhenUploaded": "

    Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to Amazon S3.

    AWS Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or AWS Lambda function. This operation is only supported for file gateways.

    For more information, see Getting File Upload Notification in the Storage Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification).

    ", - "RefreshCache": "

    Refreshes the cache for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed or replaced since the gateway last listed the bucket's contents and cached the results. This operation is only supported in the file gateway type. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting Notified About File Operations.

    When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through an CloudWatch event when your RefreshCache operation completes.

    ", - "RemoveTagsFromResource": "

    Removes one or more tags from the specified resource. This operation is only supported in the cached volume, stored volume and tape gateway types.

    ", + "RefreshCache": "

    Refreshes the cache for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed or replaced since the gateway last listed the bucket's contents and cached the results. This operation is only supported in the file gateway type. You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting Notified About File Operations.

    When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through an CloudWatch event when your RefreshCache operation completes.

    Throttle limit: This API is asynchronous so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting Notified About File Operations.

    If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException error because too many requests were sent to the server.

    For more information, see \"https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification\".

    ", + "RemoveTagsFromResource": "

    Removes one or more tags from the specified resource. This operation is supported in storage gateways of all types.

    ", "ResetCache": "

    Resets all cache disks that have encountered a error and makes the disks available for reconfiguration as cache storage. If your cache disk encounters a error, the gateway prevents read and write operations on virtual tapes in the gateway. For example, an error can occur when a disk is corrupted or removed from the gateway. When a cache is reset, the gateway loses its cache storage. At this point you can reconfigure the disks as cache disks. This operation is only supported in the cached volume and tape types.

    If the cache disk you are resetting contains data that has not been uploaded to Amazon S3 yet, that data can be lost. After you reset cache disks, there will be no configured cache disks left in the gateway, so you must configure at least one new cache disk for your gateway to function properly.

    ", "RetrieveTapeArchive": "

    Retrieves an archived virtual tape from the virtual tape shelf (VTS) to a tape gateway. Virtual tapes archived in the VTS are not associated with any gateway. However after a tape is retrieved, it is associated with a gateway, even though it is also listed in the VTS, that is, archive. This operation is only supported in the tape gateway type.

    Once a tape is successfully retrieved to a gateway, it cannot be retrieved again to another gateway. You must archive the tape again before you can retrieve it to another gateway. This operation is only supported in the tape gateway type.

    ", "RetrieveTapeRecoveryPoint": "

    Retrieves the recovery point for the specified virtual tape. This operation is only supported in the tape gateway type.

    A recovery point is a point in time view of a virtual tape at which all the data on the tape is consistent. If your gateway crashes, virtual tapes that have recovery points can be recovered to a new gateway.

    The virtual tape can be retrieved to only one gateway. The retrieved tape is read-only. The virtual tape can be retrieved to only a tape gateway. There is no charge for retrieving recovery points.

    ", "SetLocalConsolePassword": "

    Sets the password for your VM local console. When you log in to the local console for the first time, you log in to the VM with the default credentials. We recommend that you set a new password. You don't need to know the default password to set a new password.

    ", "SetSMBGuestPassword": "

    Sets the password for the guest user smbguest. The smbguest user is the user when the authentication method for the file share is set to GuestAccess.

    ", "ShutdownGateway": "

    Shuts down a gateway. To specify which gateway to shut down, use the Amazon Resource Name (ARN) of the gateway in the body of your request.

    The operation shuts down the gateway service component running in the gateway's virtual machine (VM) and not the host VM.

    If you want to shut down the VM, it is recommended that you first shut down the gateway component in the VM to avoid unpredictable conditions.

    After the gateway is shutdown, you cannot call any other API except StartGateway, DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. Your applications cannot read from or write to the gateway's storage volumes, and there are no snapshots taken.

    When you make a shutdown request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to shut down. You can call the DescribeGatewayInformation API to check the status. For more information, see ActivateGateway.

    If do not intend to use the gateway again, you must delete the gateway (using DeleteGateway) to no longer pay software charges associated with the gateway.

    ", + "StartAvailabilityMonitorTest": "

    Start a test that verifies that the specified gateway is configured for High Availability monitoring in your host environment. This request only initiates the test and that a successful response only indicates that the test was started. It doesn't indicate that the test passed. For the status of the test, invoke the DescribeAvailabilityMonitorTest API.

    Starting this test will cause your gateway to go offline for a brief period.

    ", "StartGateway": "

    Starts a gateway that you previously shut down (see ShutdownGateway). After the gateway starts, you can then make other API calls, your applications can read from or write to the gateway's storage volumes and you will be able to take snapshot backups.

    When you make a request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to be ready. You should call DescribeGatewayInformation and check the status before making any additional API calls. For more information, see ActivateGateway.

    To specify which gateway to start, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", - "UpdateBandwidthRateLimit": "

    Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains.

    By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.

    To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", - "UpdateChapCredentials": "

    Updates the Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target. By default, a gateway does not have CHAP enabled; however, for added security, you might use it.

    When you update CHAP credentials, all existing connections on the target are closed and initiators must reconnect with the new credentials.

    ", + "UpdateBandwidthRateLimit": "

    Updates the bandwidth rate limits of a gateway. You can update both the upload and download bandwidth rate limit or specify only one of the two. If you don't set a bandwidth rate limit, the existing rate limit remains. This operation is supported for the stored volume, cached volume and tape gateway types.'

    By default, a gateway's bandwidth rate limits are not set. If you don't set any limit, the gateway does not have any limitations on its bandwidth usage and could potentially use the maximum available bandwidth.

    To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

    ", + "UpdateChapCredentials": "

    Updates the Challenge-Handshake Authentication Protocol (CHAP) credentials for a specified iSCSI target. By default, a gateway does not have CHAP enabled; however, for added security, you might use it. This operation is supported in the volume and tape gateway types.

    When you update CHAP credentials, all existing connections on the target are closed and initiators must reconnect with the new credentials.

    ", "UpdateGatewayInformation": "

    Updates a gateway's metadata, which includes the gateway's name and time zone. To specify which gateway to update, use the Amazon Resource Name (ARN) of the gateway in your request.

    For Gateways activated after September 2, 2015, the gateway's ARN contains the gateway ID rather than the gateway name. However, changing the name of the gateway has no effect on the gateway's ARN.

    ", "UpdateGatewaySoftwareNow": "

    Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.

    When you make this request, you get a 200 OK success response immediately. However, it might take some time for the update to complete. You can call DescribeGatewayInformation to verify the gateway is in the STATE_RUNNING state.

    A software update forces a system restart of your gateway. You can minimize the chance of any disruption to your applications by increasing your iSCSI Initiators' timeouts. For more information about increasing iSCSI Initiator timeouts for Windows and Linux, see Customizing Your Windows iSCSI Settings and Customizing Your Linux iSCSI Settings, respectively.

    ", "UpdateMaintenanceStartTime": "

    Updates a gateway's weekly maintenance start time information, including day and time of the week. The maintenance time is the time in your gateway's time zone.

    ", @@ -93,6 +95,13 @@ "ActivateGatewayInput$ActivationKey": "

    Your gateway activation key. You can obtain the activation key by sending an HTTP GET request with redirects enabled to the gateway IP address (port 80). The redirect URL returned in the response provides you the activation key for your gateway in the query string parameter activationKey. It may also include other activation-related parameters, however, these are merely defaults -- the arguments you pass to the ActivateGateway API call determine the actual configuration of your gateway.

    For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/get-activation-key.html in the Storage Gateway User Guide.

    " } }, + "ActiveDirectoryStatus": { + "base": null, + "refs": { + "DescribeSMBSettingsOutput$ActiveDirectoryStatus": "

    Indicates the status of a gateway that is a member of the Active Directory domain.

    • ACCESS_DENIED: Indicates that the JoinDomain operation failed due to an authentication error.

    • DETACHED: Indicates that gateway is not joined to a domain.

    • JOINED: Indicates that the gateway has successfully joined a domain.

    • JOINING: Indicates that a JoinDomain operation is in progress.

    • NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network or connectivity error.

    • TIMEOUT: Indicates that the JoinDomain operation failed because the operation didn't complete within the allotted time.

    • UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to another type of error.

    ", + "JoinDomainOutput$ActiveDirectoryStatus": "

    Indicates the status of the gateway as a member of the Active Directory domain.

    • ACCESS_DENIED: Indicates that the JoinDomain operation failed due to an authentication error.

    • DETACHED: Indicates that gateway is not joined to a domain.

    • JOINED: Indicates that the gateway has successfully joined a domain.

    • JOINING: Indicates that a JoinDomain operation is in progress.

    • NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network or connectivity error.

    • TIMEOUT: Indicates that the JoinDomain operation failed because the operation didn't complete within the allotted time.

    • UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to another type of error.

    " + } + }, "AddCacheInput": { "base": null, "refs": { @@ -160,6 +169,12 @@ "SMBFileShareInfo$Authentication": null } }, + "AvailabilityMonitorTestStatus": { + "base": null, + "refs": { + "DescribeAvailabilityMonitorTestOutput$Status": "

    The status of the High Availability monitoring test. If a test hasn't been performed, the value of this field is null.

    " + } + }, "BandwidthDownloadRateLimit": { "base": null, "refs": { @@ -282,7 +297,7 @@ "CloudWatchLogGroupARN": { "base": null, "refs": { - "DescribeGatewayInformationOutput$CloudWatchLogGroupARN": "

    The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was used to monitor and log events in the gateway.

    ", + "DescribeGatewayInformationOutput$CloudWatchLogGroupARN": "

    The Amazon Resource Name (ARN) of the Amazon CloudWatch Log Group that is used to monitor events in the gateway.

    ", "UpdateGatewayInformationInput$CloudWatchLogGroupARN": "

    The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you want to use to monitor and log events in the gateway.

    For more information, see What Is Amazon CloudWatch Logs?.

    " } }, @@ -467,6 +482,16 @@ "refs": { } }, + "DescribeAvailabilityMonitorTestInput": { + "base": null, + "refs": { + } + }, + "DescribeAvailabilityMonitorTestOutput": { + "base": null, + "refs": { + } + }, "DescribeBandwidthRateLimitInput": { "base": "

    A JSON object containing the of the gateway.

    ", "refs": { @@ -737,7 +762,7 @@ "DomainUserName": { "base": null, "refs": { - "JoinDomainInput$UserName": "

    Sets the user name of user who has permission to add the gateway to the Active Directory domain.

    " + "JoinDomainInput$UserName": "

    Sets the user name of user who has permission to add the gateway to the Active Directory domain. The domain user account should be enabled to join computers to the domain. For example, you can use the domain administrator account or an account with delegated permissions to join computers to the domain.

    " } }, "DomainUserPassword": { @@ -900,6 +925,8 @@ "DeleteGatewayInput$GatewayARN": null, "DeleteGatewayOutput$GatewayARN": null, "DeleteTapeInput$GatewayARN": "

    The unique Amazon Resource Name (ARN) of the gateway that the virtual tape to delete is associated with. Use the ListGateways operation to return a list of gateways for your account and AWS Region.

    ", + "DescribeAvailabilityMonitorTestInput$GatewayARN": null, + "DescribeAvailabilityMonitorTestOutput$GatewayARN": null, "DescribeBandwidthRateLimitInput$GatewayARN": null, "DescribeBandwidthRateLimitOutput$GatewayARN": null, "DescribeCacheInput$GatewayARN": null, @@ -944,6 +971,8 @@ "SetSMBGuestPasswordOutput$GatewayARN": null, "ShutdownGatewayInput$GatewayARN": null, "ShutdownGatewayOutput$GatewayARN": null, + "StartAvailabilityMonitorTestInput$GatewayARN": null, + "StartAvailabilityMonitorTestOutput$GatewayARN": null, "StartGatewayInput$GatewayARN": null, "StartGatewayOutput$GatewayARN": null, "TapeArchive$RetrievedTo": "

    The Amazon Resource Name (ARN) of the tape gateway that the virtual tape is being retrieved to.

    The virtual tape is retrieved from the virtual tape shelf (VTS).

    ", @@ -1030,6 +1059,12 @@ "Hosts$member": null } }, + "HostEnvironment": { + "base": null, + "refs": { + "DescribeGatewayInformationOutput$HostEnvironment": "

    The type of hypervisor environment used by the host.

    " + } + }, "Hosts": { "base": null, "refs": { @@ -1548,6 +1583,16 @@ "UpdateNFSFileShareInput$Squash": "

    The user mapped to anonymous user. Valid options are the following:

    • RootSquash - Only root is mapped to anonymous user.

    • NoSquash - No one is mapped to anonymous user

    • AllSquash - Everyone is mapped to anonymous user.

    " } }, + "StartAvailabilityMonitorTestInput": { + "base": null, + "refs": { + } + }, + "StartAvailabilityMonitorTestOutput": { + "base": null, + "refs": { + } + }, "StartGatewayInput": { "base": "

    A JSON object containing the of the gateway to start.

    ", "refs": { @@ -1804,12 +1849,19 @@ "Time": { "base": null, "refs": { + "DescribeAvailabilityMonitorTestOutput$StartTime": "

    The time the High Availability monitoring test was started. If a test hasn't been performed, the value of this field is null.

    ", "Tape$TapeCreatedDate": "

    The date the virtual tape was created.

    ", "TapeArchive$TapeCreatedDate": "

    The date the virtual tape was created.

    ", "TapeArchive$CompletionTime": "

    The time that the archiving of the virtual tape was completed.

    The default time stamp format is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.

    ", "TapeRecoveryPointInfo$TapeRecoveryPointTime": "

    The time when the point-in-time view of the virtual tape was replicated for later recovery.

    The default time stamp format of the tape recovery point time is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' format.

    " } }, + "TimeoutInSeconds": { + "base": null, + "refs": { + "JoinDomainInput$TimeoutInSeconds": "

    Specifies the time in seconds, in which the JoinDomain operation must complete. The default is 20 seconds.

    " + } + }, "UpdateBandwidthRateLimitInput": { "base": "

    A JSON object containing one or more of the following fields:

    ", "refs": { diff --git a/models/apis/transcribe/2017-10-26/api-2.json b/models/apis/transcribe/2017-10-26/api-2.json index 05a11e1ca9c..d5c85f1bf41 100644 --- a/models/apis/transcribe/2017-10-26/api-2.json +++ b/models/apis/transcribe/2017-10-26/api-2.json @@ -315,6 +315,11 @@ "Vocabularies":{"shape":"Vocabularies"} } }, + "MaxAlternatives":{ + "type":"integer", + "max":10, + "min":2 + }, "MaxResults":{ "type":"integer", "max":100, @@ -385,7 +390,9 @@ "VocabularyName":{"shape":"VocabularyName"}, "ShowSpeakerLabels":{"shape":"Boolean"}, "MaxSpeakerLabels":{"shape":"MaxSpeakers"}, - "ChannelIdentification":{"shape":"Boolean"} + "ChannelIdentification":{"shape":"Boolean"}, + "ShowAlternatives":{"shape":"Boolean"}, + "MaxAlternatives":{"shape":"MaxAlternatives"} } }, "StartTranscriptionJobRequest":{ diff --git a/models/apis/transcribe/2017-10-26/docs-2.json b/models/apis/transcribe/2017-10-26/docs-2.json index ee5527c74ca..f060d062f8f 100644 --- a/models/apis/transcribe/2017-10-26/docs-2.json +++ b/models/apis/transcribe/2017-10-26/docs-2.json @@ -22,7 +22,8 @@ "base": null, "refs": { "Settings$ShowSpeakerLabels": "

    Determines whether the transcription job uses speaker recognition to identify different speakers in the input audio. Speaker recognition labels individual speakers in the audio file. If you set the ShowSpeakerLabels field to true, you must also set the maximum number of speaker labels MaxSpeakerLabels field.

    You can't set both ShowSpeakerLabels and ChannelIdentification in the same request. If you set both, your request returns a BadRequestException.

    ", - "Settings$ChannelIdentification": "

    Instructs Amazon Transcribe to process each audio channel separately and then merge the transcription output of each channel into a single transcription.

    Amazon Transcribe also produces a transcription of each item detected on an audio channel, including the start time and end time of the item and alternative transcriptions of the item including the confidence that Amazon Transcribe has in the transcription.

    You can't set both ShowSpeakerLabels and ChannelIdentification in the same request. If you set both, your request returns a BadRequestException.

    " + "Settings$ChannelIdentification": "

    Instructs Amazon Transcribe to process each audio channel separately and then merge the transcription output of each channel into a single transcription.

    Amazon Transcribe also produces a transcription of each item detected on an audio channel, including the start time and end time of the item and alternative transcriptions of the item including the confidence that Amazon Transcribe has in the transcription.

    You can't set both ShowSpeakerLabels and ChannelIdentification in the same request. If you set both, your request returns a BadRequestException.

    ", + "Settings$ShowAlternatives": "

    Determines whether the transcription contains alternative transcriptions. If you set the ShowAlternatives field to true, you must also set the maximum number of alternatives to return in the MaxAlternatives field.

    " } }, "ConflictException": { @@ -143,6 +144,12 @@ "refs": { } }, + "MaxAlternatives": { + "base": null, + "refs": { + "Settings$MaxAlternatives": "

    The number of alternative transcriptions that the service should return. If you specify the MaxAlternatives field, you must set the ShowAlternatives field to true.

    " + } + }, "MaxResults": { "base": null, "refs": { diff --git a/models/apis/workspaces/2015-04-08/api-2.json b/models/apis/workspaces/2015-04-08/api-2.json index 7690bfc055e..e8d44a79622 100644 --- a/models/apis/workspaces/2015-04-08/api-2.json +++ b/models/apis/workspaces/2015-04-08/api-2.json @@ -148,6 +148,22 @@ {"shape":"AccessDeniedException"} ] }, + "DeregisterWorkspaceDirectory":{ + "name":"DeregisterWorkspaceDirectory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeregisterWorkspaceDirectoryRequest"}, + "output":{"shape":"DeregisterWorkspaceDirectoryResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValuesException"}, + {"shape":"OperationNotSupportedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidResourceStateException"} + ] + }, "DescribeAccount":{ "name":"DescribeAccount", "http":{ @@ -361,6 +377,47 @@ {"shape":"AccessDeniedException"} ] }, + "ModifySelfservicePermissions":{ + "name":"ModifySelfservicePermissions", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifySelfservicePermissionsRequest"}, + "output":{"shape":"ModifySelfservicePermissionsResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"} + ] + }, + "ModifyWorkspaceAccessProperties":{ + "name":"ModifyWorkspaceAccessProperties", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyWorkspaceAccessPropertiesRequest"}, + "output":{"shape":"ModifyWorkspaceAccessPropertiesResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ] + }, + "ModifyWorkspaceCreationProperties":{ + "name":"ModifyWorkspaceCreationProperties", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyWorkspaceCreationPropertiesRequest"}, + "output":{"shape":"ModifyWorkspaceCreationPropertiesResult"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "ModifyWorkspaceProperties":{ "name":"ModifyWorkspaceProperties", "http":{ @@ -411,6 +468,25 @@ "input":{"shape":"RebuildWorkspacesRequest"}, "output":{"shape":"RebuildWorkspacesResult"} }, + "RegisterWorkspaceDirectory":{ + "name":"RegisterWorkspaceDirectory", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RegisterWorkspaceDirectoryRequest"}, + "output":{"shape":"RegisterWorkspaceDirectoryResult"}, + "errors":[ + {"shape":"InvalidParameterValuesException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceLimitExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"WorkspacesDefaultRoleNotFoundException"}, + {"shape":"InvalidResourceStateException"}, + {"shape":"UnsupportedNetworkConfigurationException"}, + {"shape":"OperationNotSupportedException"} + ] + }, "RestoreWorkspace":{ "name":"RestoreWorkspace", "http":{ @@ -496,6 +572,13 @@ }, "exception":true }, + "AccessPropertyValue":{ + "type":"string", + "enum":[ + "ALLOW", + "DENY" + ] + }, "AccountModification":{ "type":"structure", "members":{ @@ -706,7 +789,8 @@ "EnableInternetAccess":{"shape":"BooleanObject"}, "DefaultOu":{"shape":"DefaultOu"}, "CustomSecurityGroupId":{"shape":"SecurityGroupId"}, - "UserEnabledAsLocalAdministrator":{"shape":"BooleanObject"} + "UserEnabledAsLocalAdministrator":{"shape":"BooleanObject"}, + "EnableMaintenanceMode":{"shape":"BooleanObject"} } }, "DeleteIpGroupRequest":{ @@ -749,6 +833,18 @@ "members":{ } }, + "DeregisterWorkspaceDirectoryRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{"shape":"DirectoryId"} + } + }, + "DeregisterWorkspaceDirectoryResult":{ + "type":"structure", + "members":{ + } + }, "DescribeAccountModificationsRequest":{ "type":"structure", "members":{ @@ -834,6 +930,7 @@ "type":"structure", "members":{ "DirectoryIds":{"shape":"DirectoryIdList"}, + "Limit":{"shape":"Limit"}, "NextToken":{"shape":"PaginationToken"} } }, @@ -908,6 +1005,8 @@ "Description":{"type":"string"}, "DirectoryId":{ "type":"string", + "max":65, + "min":10, "pattern":"^d-[0-9a-f]{8,63}$" }, "DirectoryIdList":{ @@ -1135,6 +1234,54 @@ "members":{ } }, + "ModifySelfservicePermissionsRequest":{ + "type":"structure", + "required":[ + "ResourceId", + "SelfservicePermissions" + ], + "members":{ + "ResourceId":{"shape":"DirectoryId"}, + "SelfservicePermissions":{"shape":"SelfservicePermissions"} + } + }, + "ModifySelfservicePermissionsResult":{ + "type":"structure", + "members":{ + } + }, + "ModifyWorkspaceAccessPropertiesRequest":{ + "type":"structure", + "required":[ + "ResourceId", + "WorkspaceAccessProperties" + ], + "members":{ + "ResourceId":{"shape":"DirectoryId"}, + "WorkspaceAccessProperties":{"shape":"WorkspaceAccessProperties"} + } + }, + "ModifyWorkspaceAccessPropertiesResult":{ + "type":"structure", + "members":{ + } + }, + "ModifyWorkspaceCreationPropertiesRequest":{ + "type":"structure", + "required":[ + "ResourceId", + "WorkspaceCreationProperties" + ], + "members":{ + "ResourceId":{"shape":"DirectoryId"}, + "WorkspaceCreationProperties":{"shape":"WorkspaceCreationProperties"} + } + }, + "ModifyWorkspaceCreationPropertiesResult":{ + "type":"structure", + "members":{ + } + }, "ModifyWorkspacePropertiesRequest":{ "type":"structure", "required":[ @@ -1268,6 +1415,26 @@ "min":1, "pattern":"^[-0-9a-z]{1,31}$" }, + "RegisterWorkspaceDirectoryRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "EnableWorkDocs" + ], + "members":{ + "DirectoryId":{"shape":"DirectoryId"}, + "SubnetIds":{"shape":"SubnetIds"}, + "EnableWorkDocs":{"shape":"BooleanObject"}, + "EnableSelfService":{"shape":"BooleanObject"}, + "Tenancy":{"shape":"Tenancy"}, + "Tags":{"shape":"TagList"} + } + }, + "RegisterWorkspaceDirectoryResult":{ + "type":"structure", + "members":{ + } + }, "RegistrationCode":{ "type":"string", "max":20, @@ -1368,7 +1535,19 @@ "RunningModeAutoStopTimeoutInMinutes":{"type":"integer"}, "SecurityGroupId":{ "type":"string", - "pattern":"^(sg-[0-9a-f]{8})$" + "max":20, + "min":11, + "pattern":"^(sg-([0-9a-f]{8}|[0-9a-f]{17}))$" + }, + "SelfservicePermissions":{ + "type":"structure", + "members":{ + "RestartWorkspace":{"shape":"ReconnectEnum"}, + "IncreaseVolumeSize":{"shape":"ReconnectEnum"}, + "ChangeComputeType":{"shape":"ReconnectEnum"}, + "SwitchRunningMode":{"shape":"ReconnectEnum"}, + "RebuildWorkspace":{"shape":"ReconnectEnum"} + } }, "Snapshot":{ "type":"structure", @@ -1432,11 +1611,14 @@ }, "SubnetId":{ "type":"string", - "pattern":"^(subnet-[0-9a-f]{8})$" + "max":24, + "min":15, + "pattern":"^(subnet-([0-9a-f]{8}|[0-9a-f]{17}))$" }, "SubnetIds":{ "type":"list", - "member":{"shape":"SubnetId"} + "member":{"shape":"SubnetId"}, + "max":2 }, "Tag":{ "type":"structure", @@ -1470,6 +1652,13 @@ "ADMIN_MAINTENANCE" ] }, + "Tenancy":{ + "type":"string", + "enum":[ + "DEDICATED", + "SHARED" + ] + }, "TerminateRequest":{ "type":"structure", "required":["WorkspaceId"], @@ -1497,6 +1686,13 @@ } }, "Timestamp":{"type":"timestamp"}, + "UnsupportedNetworkConfigurationException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, "UnsupportedWorkspaceConfigurationException":{ "type":"structure", "members":{ @@ -1553,6 +1749,18 @@ "ModificationStates":{"shape":"ModificationStateList"} } }, + "WorkspaceAccessProperties":{ + "type":"structure", + "members":{ + "DeviceTypeWindows":{"shape":"AccessPropertyValue"}, + "DeviceTypeOsx":{"shape":"AccessPropertyValue"}, + "DeviceTypeWeb":{"shape":"AccessPropertyValue"}, + "DeviceTypeIos":{"shape":"AccessPropertyValue"}, + "DeviceTypeAndroid":{"shape":"AccessPropertyValue"}, + "DeviceTypeChromeOs":{"shape":"AccessPropertyValue"}, + "DeviceTypeZeroClient":{"shape":"AccessPropertyValue"} + } + }, "WorkspaceBundle":{ "type":"structure", "members":{ @@ -1578,6 +1786,16 @@ "type":"list", "member":{"shape":"WorkspaceConnectionStatus"} }, + "WorkspaceCreationProperties":{ + "type":"structure", + "members":{ + "EnableInternetAccess":{"shape":"BooleanObject"}, + "DefaultOu":{"shape":"DefaultOu"}, + "CustomSecurityGroupId":{"shape":"SecurityGroupId"}, + "UserEnabledAsLocalAdministrator":{"shape":"BooleanObject"}, + "EnableMaintenanceMode":{"shape":"BooleanObject"} + } + }, "WorkspaceDirectory":{ "type":"structure", "members":{ @@ -1593,7 +1811,10 @@ "WorkspaceSecurityGroupId":{"shape":"SecurityGroupId"}, "State":{"shape":"WorkspaceDirectoryState"}, "WorkspaceCreationProperties":{"shape":"DefaultWorkspaceCreationProperties"}, - "ipGroupIds":{"shape":"IpGroupIdList"} + "ipGroupIds":{"shape":"IpGroupIdList"}, + "WorkspaceAccessProperties":{"shape":"WorkspaceAccessProperties"}, + "Tenancy":{"shape":"Tenancy"}, + "SelfservicePermissions":{"shape":"SelfservicePermissions"} } }, "WorkspaceDirectoryState":{ @@ -1747,6 +1968,13 @@ "ERROR" ] }, + "WorkspacesDefaultRoleNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"ExceptionMessage"} + }, + "exception":true + }, "WorkspacesIpGroup":{ "type":"structure", "members":{ diff --git a/models/apis/workspaces/2015-04-08/docs-2.json b/models/apis/workspaces/2015-04-08/docs-2.json index f4c5b6b6354..e2896a6450b 100644 --- a/models/apis/workspaces/2015-04-08/docs-2.json +++ b/models/apis/workspaces/2015-04-08/docs-2.json @@ -11,26 +11,31 @@ "DeleteIpGroup": "

    Deletes the specified IP access control group.

    You cannot delete an IP access control group that is associated with a directory.

    ", "DeleteTags": "

    Deletes the specified tags from the specified WorkSpaces resource.

    ", "DeleteWorkspaceImage": "

    Deletes the specified image from your account. To delete an image, you must first delete any bundles that are associated with the image and un-share the image if it is shared with other accounts.

    ", - "DescribeAccount": "

    Retrieves a list that describes the configuration of bring your own license (BYOL) for the specified account.

    ", - "DescribeAccountModifications": "

    Retrieves a list that describes modifications to the configuration of bring your own license (BYOL) for the specified account.

    ", + "DeregisterWorkspaceDirectory": "

    Deregisters the specified directory. This operation is asynchronous and returns before the WorkSpace directory is deregistered. If any WorkSpaces are registered to this directory, you must remove them before you can deregister the directory.

    ", + "DescribeAccount": "

    Retrieves a list that describes the configuration of Bring Your Own License (BYOL) for the specified account.

    ", + "DescribeAccountModifications": "

    Retrieves a list that describes modifications to the configuration of Bring Your Own License (BYOL) for the specified account.

    ", "DescribeClientProperties": "

    Retrieves a list that describes one or more specified Amazon WorkSpaces clients.

    ", "DescribeIpGroups": "

    Describes one or more of your IP access control groups.

    ", "DescribeTags": "

    Describes the specified tags for the specified WorkSpaces resource.

    ", "DescribeWorkspaceBundles": "

    Retrieves a list that describes the available WorkSpace bundles.

    You can filter the results using either bundle ID or owner, but not both.

    ", - "DescribeWorkspaceDirectories": "

    Describes the available AWS Directory Service directories that are registered with Amazon WorkSpaces.

    ", + "DescribeWorkspaceDirectories": "

    Describes the available directories that are registered with Amazon WorkSpaces.

    ", "DescribeWorkspaceImages": "

    Retrieves a list that describes one or more specified images, if the image identifiers are provided. Otherwise, all images in the account are described.

    ", "DescribeWorkspaceSnapshots": "

    Describes the snapshots for the specified WorkSpace.

    ", "DescribeWorkspaces": "

    Describes the specified WorkSpaces.

    You can filter the results by using the bundle identifier, directory identifier, or owner, but you can specify only one filter at a time.

    ", "DescribeWorkspacesConnectionStatus": "

    Describes the connection status of the specified WorkSpaces.

    ", "DisassociateIpGroups": "

    Disassociates the specified IP access control group from the specified directory.

    ", - "ImportWorkspaceImage": "

    Imports the specified Windows 7 or Windows 10 bring your own license (BYOL) image into Amazon WorkSpaces. The image must be an already licensed EC2 image that is in your AWS account, and you must own the image.

    ", - "ListAvailableManagementCidrRanges": "

    Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that you can use for the network management interface when you enable bring your own license (BYOL).

    The management network interface is connected to a secure Amazon WorkSpaces management network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to manage the WorkSpace.

    ", - "ModifyAccount": "

    Modifies the configuration of bring your own license (BYOL) for the specified account.

    ", + "ImportWorkspaceImage": "

    Imports the specified Windows 7 or Windows 10 Bring Your Own License (BYOL) image into Amazon WorkSpaces. The image must be an already licensed EC2 image that is in your AWS account, and you must own the image.

    ", + "ListAvailableManagementCidrRanges": "

    Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that you can use for the network management interface when you enable Bring Your Own License (BYOL).

    The management network interface is connected to a secure Amazon WorkSpaces management network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to manage the WorkSpace.

    ", + "ModifyAccount": "

    Modifies the configuration of Bring Your Own License (BYOL) for the specified account.

    ", "ModifyClientProperties": "

    Modifies the properties of the specified Amazon WorkSpaces clients.

    ", + "ModifySelfservicePermissions": "

    Modifies the self-service WorkSpace management capabilities for your users. For more information, see Enable Self-Service WorkSpace Management Capabilities for Your Users.

    ", + "ModifyWorkspaceAccessProperties": "

    Specifies which devices and operating systems users can use to access their Workspaces. For more information, see Control Device Access.

    ", + "ModifyWorkspaceCreationProperties": "

    Modify the default properties used to create WorkSpaces.

    ", "ModifyWorkspaceProperties": "

    Modifies the specified WorkSpace properties.

    ", "ModifyWorkspaceState": "

    Sets the state of the specified WorkSpace.

    To maintain a WorkSpace without being interrupted, set the WorkSpace state to ADMIN_MAINTENANCE. WorkSpaces in this state do not respond to requests to reboot, stop, start, rebuild, or restore. An AutoStop WorkSpace in this state is not stopped. Users cannot log into a WorkSpace in the ADMIN_MAINTENANCE state.

    ", "RebootWorkspaces": "

    Reboots the specified WorkSpaces.

    You cannot reboot a WorkSpace unless its state is AVAILABLE or UNHEALTHY.

    This operation is asynchronous and returns before the WorkSpaces have rebooted.

    ", "RebuildWorkspaces": "

    Rebuilds the specified WorkSpace.

    You cannot rebuild a WorkSpace unless its state is AVAILABLE, ERROR, or UNHEALTHY.

    Rebuilding a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Rebuild a WorkSpace.

    This operation is asynchronous and returns before the WorkSpaces have been completely rebuilt.

    ", + "RegisterWorkspaceDirectory": "

    Registers the specified directory. This operation is asynchronous and returns before the WorkSpace directory is registered. If this is the first time you are registering a directory, you will need to create the workspaces_DefaultRole role before you can register a directory. For more information, see Creating the workspaces_DefaultRole Role.

    ", "RestoreWorkspace": "

    Restores the specified WorkSpace to its last known healthy state.

    You cannot restore a WorkSpace unless its state is AVAILABLE, ERROR, or UNHEALTHY.

    Restoring a WorkSpace is a potentially destructive action that can result in the loss of data. For more information, see Restore a WorkSpace.

    This operation is asynchronous and returns before the WorkSpace is completely restored.

    ", "RevokeIpRules": "

    Removes one or more rules from the specified IP access control group.

    ", "StartWorkspaces": "

    Starts the specified WorkSpaces.

    You cannot start a WorkSpace unless it has a running mode of AutoStop and a state of STOPPED.

    ", @@ -50,8 +55,20 @@ "refs": { } }, + "AccessPropertyValue": { + "base": null, + "refs": { + "WorkspaceAccessProperties$DeviceTypeWindows": "

    Indicates whether users can use Windows clients to access their WorkSpaces. To restrict WorkSpaces access to trusted devices (also known as managed devices) with valid certificates, specify a value of TRUST. For more information, see Restrict WorkSpaces Access to Trusted Devices.

    ", + "WorkspaceAccessProperties$DeviceTypeOsx": "

    Indicates whether users can use macOS clients to access their WorkSpaces. To restrict WorkSpaces access to trusted devices (also known as managed devices) with valid certificates, specify a value of TRUST. For more information, see Restrict WorkSpaces Access to Trusted Devices.

    ", + "WorkspaceAccessProperties$DeviceTypeWeb": "

    Indicates whether users can access their WorkSpaces through a web browser.

    ", + "WorkspaceAccessProperties$DeviceTypeIos": "

    Indicates whether users can use iOS devices to access their WorkSpaces.

    ", + "WorkspaceAccessProperties$DeviceTypeAndroid": "

    Indicates whether users can use Android devices to access their WorkSpaces.

    ", + "WorkspaceAccessProperties$DeviceTypeChromeOs": "

    Indicates whether users can use Chromebooks to access their WorkSpaces.

    ", + "WorkspaceAccessProperties$DeviceTypeZeroClient": "

    Indicates whether users can use zero client devices to access their WorkSpaces.

    " + } + }, "AccountModification": { - "base": "

    Describes a modification to the configuration of bring your own license (BYOL) for the specified account.

    ", + "base": "

    Describes a modification to the configuration of Bring Your Own License (BYOL) for the specified account.

    ", "refs": { "AccountModificationList$member": null } @@ -93,9 +110,15 @@ "refs": { "DefaultWorkspaceCreationProperties$EnableWorkDocs": "

    Specifies whether the directory is enabled for Amazon WorkDocs.

    ", "DefaultWorkspaceCreationProperties$EnableInternetAccess": "

    Specifies whether to automatically assign a public IP address to WorkSpaces in this directory by default. If enabled, the public IP address allows outbound internet access from your WorkSpaces when you’re using an internet gateway in the Amazon VPC in which your WorkSpaces are located. If you're using a Network Address Translation (NAT) gateway for outbound internet access from your VPC, or if your WorkSpaces are in public subnets and you manually assign them Elastic IP addresses, you should disable this setting. This setting applies to new WorkSpaces that you launch or to existing WorkSpaces that you rebuild. For more information, see Configure a VPC for Amazon WorkSpaces.

    ", - "DefaultWorkspaceCreationProperties$UserEnabledAsLocalAdministrator": "

    Specifies whether the WorkSpace user is an administrator on the WorkSpace.

    ", + "DefaultWorkspaceCreationProperties$UserEnabledAsLocalAdministrator": "

    Specifies whether WorkSpace users are local administrators on their WorkSpaces.

    ", + "DefaultWorkspaceCreationProperties$EnableMaintenanceMode": "

    Specifies whether maintenance mode is enabled for WorkSpaces. For more information, see WorkSpace Maintenance.

    ", + "RegisterWorkspaceDirectoryRequest$EnableWorkDocs": "

    Indicates whether Amazon WorkDocs is enabled or disabled. If you have enabled this parameter and WorkDocs is not available in the Region, you will receive an OperationNotSupportedException error. Set EnableWorkDocs to disabled, and try again.

    ", + "RegisterWorkspaceDirectoryRequest$EnableSelfService": "

    Indicates whether self-service capabilities are enabled or disabled.

    ", "Workspace$UserVolumeEncryptionEnabled": "

    Indicates whether the data stored on the user volume is encrypted.

    ", "Workspace$RootVolumeEncryptionEnabled": "

    Indicates whether the data stored on the root volume is encrypted.

    ", + "WorkspaceCreationProperties$EnableInternetAccess": "

    Indicates whether internet access is enabled for your WorkSpaces.

    ", + "WorkspaceCreationProperties$UserEnabledAsLocalAdministrator": "

    Indicates whether users are local administrators of their WorkSpaces.

    ", + "WorkspaceCreationProperties$EnableMaintenanceMode": "

    Indicates whether maintenance mode is enabled for your WorkSpaces. For more information, see WorkSpace Maintenance.

    ", "WorkspaceRequest$UserVolumeEncryptionEnabled": "

    Indicates whether the data stored on the user volume is encrypted.

    ", "WorkspaceRequest$RootVolumeEncryptionEnabled": "

    Indicates whether the data stored on the root volume is encrypted.

    " } @@ -250,11 +273,12 @@ "DefaultOu": { "base": null, "refs": { - "DefaultWorkspaceCreationProperties$DefaultOu": "

    The organizational unit (OU) in the directory for the WorkSpace machine accounts.

    " + "DefaultWorkspaceCreationProperties$DefaultOu": "

    The organizational unit (OU) in the directory for the WorkSpace machine accounts.

    ", + "WorkspaceCreationProperties$DefaultOu": "

    The default organizational unit (OU) for your WorkSpace directories.

    " } }, "DefaultWorkspaceCreationProperties": { - "base": "

    Describes the default values used to create a WorkSpace.

    ", + "base": "

    Describes the default values that are used to create WorkSpaces. For more information, see Update Directory Details for Your WorkSpaces.

    ", "refs": { "WorkspaceDirectory$WorkspaceCreationProperties": "

    The default creation properties for all WorkSpaces in the directory.

    " } @@ -289,6 +313,16 @@ "refs": { } }, + "DeregisterWorkspaceDirectoryRequest": { + "base": null, + "refs": { + } + }, + "DeregisterWorkspaceDirectoryResult": { + "base": null, + "refs": { + } + }, "DescribeAccountModificationsRequest": { "base": null, "refs": { @@ -414,9 +448,14 @@ "base": null, "refs": { "AssociateIpGroupsRequest$DirectoryId": "

    The identifier of the directory.

    ", + "DeregisterWorkspaceDirectoryRequest$DirectoryId": "

    The identifier of the directory. If any WorkSpaces are registered to this directory, you must remove them before you deregister the directory, or you will receive an OperationNotSupportedException error.

    ", "DescribeWorkspacesRequest$DirectoryId": "

    The identifier of the directory. In addition, you can optionally specify a specific directory user (see UserName). You cannot combine this parameter with any other filter.

    ", "DirectoryIdList$member": null, "DisassociateIpGroupsRequest$DirectoryId": "

    The identifier of the directory.

    ", + "ModifySelfservicePermissionsRequest$ResourceId": "

    The identifier of the directory.

    ", + "ModifyWorkspaceAccessPropertiesRequest$ResourceId": "

    The identifier of the directory.

    ", + "ModifyWorkspaceCreationPropertiesRequest$ResourceId": "

    The identifier of the directory.

    ", + "RegisterWorkspaceDirectoryRequest$DirectoryId": "

    The identifier of the directory. You cannot register a directory if it does not have a status of Active. If the directory does not have a status of Active, you will receive an InvalidResourceStateException error. If you have already registered the maximum number of directories that you can register with Amazon WorkSpaces, you will receive a ResourceLimitExceededException error. Deregister directories that you are not using for WorkSpaces, and try again.

    ", "Workspace$DirectoryId": "

    The identifier of the AWS Directory Service directory for the WorkSpace.

    ", "WorkspaceDirectory$DirectoryId": "

    The directory identifier.

    ", "WorkspaceRequest$DirectoryId": "

    The identifier of the AWS Directory Service directory for the WorkSpace. You can use DescribeWorkspaceDirectories to list the available directories.

    " @@ -483,7 +522,9 @@ "ResourceLimitExceededException$message": "

    The exception error message.

    ", "ResourceNotFoundException$message": "

    The resource could not be found.

    ", "ResourceUnavailableException$message": "

    The exception error message.

    ", - "UnsupportedWorkspaceConfigurationException$message": null + "UnsupportedNetworkConfigurationException$message": null, + "UnsupportedWorkspaceConfigurationException$message": null, + "WorkspacesDefaultRoleNotFoundException$message": null } }, "FailedCreateWorkspaceRequest": { @@ -638,6 +679,7 @@ "base": null, "refs": { "DescribeIpGroupsRequest$MaxResults": "

    The maximum number of items to return.

    ", + "DescribeWorkspaceDirectoriesRequest$Limit": "

    The maximum number of directories to return.

    ", "DescribeWorkspaceImagesRequest$MaxResults": "

    The maximum number of items to return.

    ", "DescribeWorkspacesRequest$Limit": "

    The maximum number of items to return.

    " } @@ -708,6 +750,36 @@ "refs": { } }, + "ModifySelfservicePermissionsRequest": { + "base": null, + "refs": { + } + }, + "ModifySelfservicePermissionsResult": { + "base": null, + "refs": { + } + }, + "ModifyWorkspaceAccessPropertiesRequest": { + "base": null, + "refs": { + } + }, + "ModifyWorkspaceAccessPropertiesResult": { + "base": null, + "refs": { + } + }, + "ModifyWorkspaceCreationPropertiesRequest": { + "base": null, + "refs": { + } + }, + "ModifyWorkspaceCreationPropertiesResult": { + "base": null, + "refs": { + } + }, "ModifyWorkspacePropertiesRequest": { "base": null, "refs": { @@ -835,7 +907,12 @@ "ReconnectEnum": { "base": null, "refs": { - "ClientProperties$ReconnectEnabled": "

    Specifies whether users can cache their credentials on the Amazon WorkSpaces client. When enabled, users can choose to reconnect to their WorkSpaces without re-entering their credentials.

    " + "ClientProperties$ReconnectEnabled": "

    Specifies whether users can cache their credentials on the Amazon WorkSpaces client. When enabled, users can choose to reconnect to their WorkSpaces without re-entering their credentials.

    ", + "SelfservicePermissions$RestartWorkspace": "

    Specifies whether users can restart their WorkSpace.

    ", + "SelfservicePermissions$IncreaseVolumeSize": "

    Specifies whether users can increase the volume size of the drives on their WorkSpace.

    ", + "SelfservicePermissions$ChangeComputeType": "

    Specifies whether users can change the compute type (bundle) for their WorkSpace.

    ", + "SelfservicePermissions$SwitchRunningMode": "

    Specifies whether users can switch the running mode of their WorkSpace.

    ", + "SelfservicePermissions$RebuildWorkspace": "

    Specifies whether users can rebuild the operating system of a WorkSpace to its original state.

    " } }, "Region": { @@ -844,6 +921,16 @@ "CopyWorkspaceImageRequest$SourceRegion": "

    The identifier of the source Region.

    " } }, + "RegisterWorkspaceDirectoryRequest": { + "base": null, + "refs": { + } + }, + "RegisterWorkspaceDirectoryResult": { + "base": null, + "refs": { + } + }, "RegistrationCode": { "base": null, "refs": { @@ -934,9 +1021,17 @@ "base": null, "refs": { "DefaultWorkspaceCreationProperties$CustomSecurityGroupId": "

    The identifier of any security groups to apply to WorkSpaces when they are created.

    ", + "WorkspaceCreationProperties$CustomSecurityGroupId": "

    The identifier of your custom security group.

    ", "WorkspaceDirectory$WorkspaceSecurityGroupId": "

    The identifier of the security group that is assigned to new WorkSpaces.

    " } }, + "SelfservicePermissions": { + "base": "

    Describes the self-service permissions for a directory. For more information, see Enable Self-Service WorkSpace Management Capabilities for Your Users.

    ", + "refs": { + "ModifySelfservicePermissionsRequest$SelfservicePermissions": "

    The permissions to enable or disable self-service capabilities.

    ", + "WorkspaceDirectory$SelfservicePermissions": "

    The default self-service permissions for WorkSpaces in the directory.

    " + } + }, "Snapshot": { "base": "

    Describes a snapshot.

    ", "refs": { @@ -1004,6 +1099,7 @@ "SubnetIds": { "base": null, "refs": { + "RegisterWorkspaceDirectoryRequest$SubnetIds": "

    The identifiers of the subnets for your virtual private cloud (VPC). Make sure that the subnets are in supported Availability Zones. The subnets must also be in separate Availability Zones. If these conditions are not met, you will receive an OperationNotSupportedException error.

    ", "WorkspaceDirectory$SubnetIds": "

    The identifiers of the subnets used with the directory.

    " } }, @@ -1033,6 +1129,7 @@ "CreateTagsRequest$Tags": "

    The tags. Each WorkSpaces resource can have a maximum of 50 tags.

    ", "DescribeTagsResult$TagList": "

    The tags.

    ", "ImportWorkspaceImageRequest$Tags": "

    The tags. Each WorkSpaces resource can have a maximum of 50 tags.

    ", + "RegisterWorkspaceDirectoryRequest$Tags": "

    The tags associated with the directory.

    ", "WorkspaceRequest$Tags": "

    The tags for the WorkSpace.

    " } }, @@ -1048,6 +1145,13 @@ "ModifyWorkspaceStateRequest$WorkspaceState": "

    The WorkSpace state.

    " } }, + "Tenancy": { + "base": null, + "refs": { + "RegisterWorkspaceDirectoryRequest$Tenancy": "

    Indicates whether your WorkSpace directory is dedicated or shared. To use Bring Your Own License (BYOL) images, this value must be set to DEDICATED and your AWS account must be enabled for BYOL. If your account has not been enabled for BYOL, you will receive an InvalidParameterValuesException error. For more information about BYOL images, see Bring Your Own Windows Desktop Images.

    ", + "WorkspaceDirectory$Tenancy": "

    Specifies whether the directory is dedicated or shared. To use Bring Your Own License (BYOL), this value must be set to DEDICATED. For more information, see Bring Your Own Windows Desktop Images.

    " + } + }, "TerminateRequest": { "base": "

    Describes the information used to terminate a WorkSpace.

    ", "refs": { @@ -1079,8 +1183,13 @@ "WorkspaceConnectionStatus$LastKnownUserConnectionTimestamp": "

    The timestamp of the last known user connection.

    " } }, + "UnsupportedNetworkConfigurationException": { + "base": "

    The configuration of this network is not supported for this operation, or your network configuration conflicts with the Amazon WorkSpaces management network IP range. For more information, see Configure a VPC for Amazon WorkSpaces.

    ", + "refs": { + } + }, "UnsupportedWorkspaceConfigurationException": { - "base": "

    The configuration of this WorkSpace is not supported for this operation. For more information, see the Amazon WorkSpaces Administration Guide.

    ", + "base": "

    The configuration of this WorkSpace is not supported for this operation. For more information, see Required Configuration and Service Components for WorkSpaces .

    ", "refs": { } }, @@ -1100,7 +1209,7 @@ "DescribeWorkspacesRequest$UserName": "

    The name of the directory user. You must specify this parameter with DirectoryId.

    ", "Workspace$UserName": "

    The user for the WorkSpace.

    ", "WorkspaceDirectory$CustomerUserName": "

    The user name for the service account.

    ", - "WorkspaceRequest$UserName": "

    The username of the user for the WorkSpace. This username must exist in the AWS Directory Service directory for the WorkSpace.

    " + "WorkspaceRequest$UserName": "

    The user name of the user for the WorkSpace. This user name must exist in the AWS Directory Service directory for the WorkSpace.

    " } }, "UserStorage": { @@ -1128,6 +1237,13 @@ "WorkspaceList$member": null } }, + "WorkspaceAccessProperties": { + "base": "

    The device types and operating systems that can be used to access a WorkSpace. For more information, see Amazon WorkSpaces Client Network Requirements.

    ", + "refs": { + "ModifyWorkspaceAccessPropertiesRequest$WorkspaceAccessProperties": "

    The device types and operating systems to enable or disable for access.

    ", + "WorkspaceDirectory$WorkspaceAccessProperties": "

    The devices and operating systems that users can use to access Workspaces.

    " + } + }, "WorkspaceBundle": { "base": "

    Describes a WorkSpace bundle.

    ", "refs": { @@ -1146,8 +1262,14 @@ "DescribeWorkspacesConnectionStatusResult$WorkspacesConnectionStatus": "

    Information about the connection status of the WorkSpace.

    " } }, + "WorkspaceCreationProperties": { + "base": "

    Describes the default properties that are used for creating WorkSpaces. For more information, see Update Directory Details for Your WorkSpaces.

    ", + "refs": { + "ModifyWorkspaceCreationPropertiesRequest$WorkspaceCreationProperties": "

    The default properties for creating WorkSpaces.

    " + } + }, "WorkspaceDirectory": { - "base": "

    Describes an AWS Directory Service directory that is used with Amazon WorkSpaces.

    ", + "base": "

    Describes a directory that is used with Amazon WorkSpaces.

    ", "refs": { "DirectoryList$member": null } @@ -1155,7 +1277,7 @@ "WorkspaceDirectoryState": { "base": null, "refs": { - "WorkspaceDirectory$State": "

    The state of the directory's registration with Amazon WorkSpaces

    " + "WorkspaceDirectory$State": "

    The state of the directory's registration with Amazon WorkSpaces.

    " } }, "WorkspaceDirectoryType": { @@ -1256,7 +1378,7 @@ "WorkspaceImageRequiredTenancy": { "base": null, "refs": { - "WorkspaceImage$RequiredTenancy": "

    Specifies whether the image is running on dedicated hardware. When bring your own license (BYOL) is enabled, this value is set to DEDICATED.

    " + "WorkspaceImage$RequiredTenancy": "

    Specifies whether the image is running on dedicated hardware. When Bring Your Own License (BYOL) is enabled, this value is set to DEDICATED. For more information, see Bring Your Own Windows Desktop Images.

    " } }, "WorkspaceImageState": { @@ -1299,6 +1421,11 @@ "Workspace$State": "

    The operational state of the WorkSpace.

    " } }, + "WorkspacesDefaultRoleNotFoundException": { + "base": "

    The workspaces_DefaultRole role could not be found. If this is the first time you are registering a directory, you will need to create the workspaces_DefaultRole role before you can register a directory. For more information, see Creating the workspaces_DefaultRole Role.

    ", + "refs": { + } + }, "WorkspacesIpGroup": { "base": "

    Describes an IP access control group.

    ", "refs": { diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index e6c4fbe2646..47afed048f2 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -1002,6 +1002,21 @@ "us-west-2" : { } } }, + "dataexchange" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "datapipeline" : { "endpoints" : { "ap-northeast-1" : { }, @@ -1563,11 +1578,16 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -2631,6 +2651,10 @@ "qldb" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -3306,6 +3330,10 @@ "session.qldb" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -3315,25 +3343,10 @@ "shield" : { "defaults" : { "protocols" : [ "https" ], - "sslCommonName" : "shield.ca-central-1.amazonaws.com" + "sslCommonName" : "shield.us-east-1.amazonaws.com" }, "endpoints" : { - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "ca-central-1" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "eu-west-3" : { }, - "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { } }, "isRegionalized" : false }, @@ -4000,6 +4013,11 @@ "cn-northwest-1" : { } } }, + "dax" : { + "endpoints" : { + "cn-northwest-1" : { } + } + }, "directconnect" : { "endpoints" : { "cn-north-1" : { }, @@ -4345,6 +4363,11 @@ "hostname" : "cn.transcribe.cn-northwest-1.amazonaws.com.cn" } } + }, + "workspaces" : { + "endpoints" : { + "cn-northwest-1" : { } + } } } }, { diff --git a/service/applicationdiscoveryservice/api_doc.go b/service/applicationdiscoveryservice/api_doc.go index cb7fd8bb7df..3f575c2673c 100644 --- a/service/applicationdiscoveryservice/api_doc.go +++ b/service/applicationdiscoveryservice/api_doc.go @@ -59,6 +59,10 @@ // This guide is intended for use with the AWS Application Discovery Service // User Guide (http://docs.aws.amazon.com/application-discovery/latest/userguide/). // +// Remember that you must set your AWS Migration Hub home region before you +// call any of these APIs, or a HomeRegionNotSetException error will be returned. +// Also, you must make the API calls while in your home region. +// // See https://docs.aws.amazon.com/goto/WebAPI/discovery-2015-11-01 for more information on this service. // // See applicationdiscoveryservice package documentation for more information. diff --git a/service/applicationdiscoveryservice/api_errors.go b/service/applicationdiscoveryservice/api_errors.go index e1d94d305c7..739f76b81a4 100644 --- a/service/applicationdiscoveryservice/api_errors.go +++ b/service/applicationdiscoveryservice/api_errors.go @@ -15,6 +15,12 @@ const ( // "ConflictErrorException". ErrCodeConflictErrorException = "ConflictErrorException" + // ErrCodeHomeRegionNotSetException for service response error code + // "HomeRegionNotSetException". + // + // The home region is not set. Set the home region to continue. + ErrCodeHomeRegionNotSetException = "HomeRegionNotSetException" + // ErrCodeInvalidParameterException for service response error code // "InvalidParameterException". // diff --git a/service/applicationdiscoveryservice/api_op_DescribeExportConfigurations.go b/service/applicationdiscoveryservice/api_op_DescribeExportConfigurations.go index ab7be5b71c1..8d7645934f3 100644 --- a/service/applicationdiscoveryservice/api_op_DescribeExportConfigurations.go +++ b/service/applicationdiscoveryservice/api_op_DescribeExportConfigurations.go @@ -12,7 +12,7 @@ import ( type DescribeExportConfigurationsInput struct { _ struct{} `type:"structure"` - // A list of continuous export ids to search for. + // A list of continuous export IDs to search for. ExportIds []string `locationName:"exportIds" type:"list"` // A number between 1 and 100 specifying the maximum number of continuous export diff --git a/service/autoscaling/api_op_CreateAutoScalingGroup.go b/service/autoscaling/api_op_CreateAutoScalingGroup.go index e35ddabf8b4..e70b3375162 100644 --- a/service/autoscaling/api_op_CreateAutoScalingGroup.go +++ b/service/autoscaling/api_op_CreateAutoScalingGroup.go @@ -104,6 +104,11 @@ type CreateAutoScalingGroupInput struct { // in the Amazon EC2 Auto Scaling User Guide. LoadBalancerNames []string `type:"list"` + // The maximum amount of time, in seconds, that an instance can be in service. + // + // Valid Range: Minimum value of 604800. + MaxInstanceLifetime *int64 `type:"integer"` + // The maximum size of the group. // // MaxSize is a required field diff --git a/service/autoscaling/api_op_CreateLaunchConfiguration.go b/service/autoscaling/api_op_CreateLaunchConfiguration.go index 20664ae727c..ca07b0b886a 100644 --- a/service/autoscaling/api_op_CreateLaunchConfiguration.go +++ b/service/autoscaling/api_op_CreateLaunchConfiguration.go @@ -146,7 +146,7 @@ type CreateLaunchConfigurationInput struct { // For more information, see Instance Placement Tenancy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html#as-vpc-tenancy) // in the Amazon EC2 Auto Scaling User Guide. // - // Valid values: default | dedicated + // Valid Values: default | dedicated PlacementTenancy *string `min:"1" type:"string"` // The ID of the RAM disk to select. @@ -166,17 +166,13 @@ type CreateLaunchConfigurationInput struct { // The maximum hourly price to be paid for any Spot Instance launched to fulfill // the request. Spot Instances are launched when the price you specify exceeds - // the current Spot market price. For more information, see Launching Spot Instances + // the current Spot price. For more information, see Launching Spot Instances // in Your Auto Scaling Group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-launch-spot-instances.html) // in the Amazon EC2 Auto Scaling User Guide. // - // If a Spot price is set, then the Auto Scaling group will only launch instances - // when the Spot price has been met, regardless of the setting in the Auto Scaling - // group's DesiredCapacity. - // - // When you change your Spot price by creating a new launch configuration, running - // instances will continue to run as long as the Spot price for those running - // instances is higher than the current Spot market price. + // When you change your maximum price by creating a new launch configuration, + // running instances will continue to run as long as the maximum price for those + // running instances is higher than the current Spot price. SpotPrice *string `min:"1" type:"string"` // The Base64-encoded user data to make available to the launched EC2 instances. diff --git a/service/autoscaling/api_op_EnterStandby.go b/service/autoscaling/api_op_EnterStandby.go index 26b54b08f82..09a04dd8172 100644 --- a/service/autoscaling/api_op_EnterStandby.go +++ b/service/autoscaling/api_op_EnterStandby.go @@ -72,6 +72,15 @@ const opEnterStandby = "EnterStandby" // // Moves the specified instances into the standby state. // +// If you choose to decrement the desired capacity of the Auto Scaling group, +// the instances can enter standby as long as the desired capacity of the Auto +// Scaling group after the instances are placed into standby is equal to or +// greater than the minimum capacity of the group. +// +// If you choose not to decrement the desired capacity of the Auto Scaling group, +// the Auto Scaling group launches new instances to replace the instances on +// standby. +// // For more information, see Temporarily Removing Instances from Your Auto Scaling // Group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-enter-exit-standby.html) // in the Amazon EC2 Auto Scaling User Guide. diff --git a/service/autoscaling/api_op_ExitStandby.go b/service/autoscaling/api_op_ExitStandby.go index a13b9a1e578..90d96be1344 100644 --- a/service/autoscaling/api_op_ExitStandby.go +++ b/service/autoscaling/api_op_ExitStandby.go @@ -62,6 +62,8 @@ const opExitStandby = "ExitStandby" // // Moves the specified instances out of the standby state. // +// After you put the instances back in service, the desired capacity is incremented. +// // For more information, see Temporarily Removing Instances from Your Auto Scaling // Group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-enter-exit-standby.html) // in the Amazon EC2 Auto Scaling User Guide. diff --git a/service/autoscaling/api_op_UpdateAutoScalingGroup.go b/service/autoscaling/api_op_UpdateAutoScalingGroup.go index 56c255e6a67..8e285dbd12e 100644 --- a/service/autoscaling/api_op_UpdateAutoScalingGroup.go +++ b/service/autoscaling/api_op_UpdateAutoScalingGroup.go @@ -55,11 +55,6 @@ type UpdateAutoScalingGroupInput struct { // The name of the launch configuration. If you specify LaunchConfigurationName // in your update request, you can't specify LaunchTemplate or MixedInstancesPolicy. - // - // To update an Auto Scaling group with a launch configuration with InstanceMonitoring - // set to false, you must first disable the collection of group metrics. Otherwise, - // you get an error. If you have previously enabled the collection of group - // metrics, you can disable it using DisableMetricsCollection. LaunchConfigurationName *string `min:"1" type:"string"` // The launch template and version to use to specify the updates. If you specify @@ -70,6 +65,11 @@ type UpdateAutoScalingGroupInput struct { // in the Amazon EC2 Auto Scaling API Reference. LaunchTemplate *LaunchTemplateSpecification `type:"structure"` + // The maximum amount of time, in seconds, that an instance can be in service. + // + // Valid Range: Minimum value of 604800. + MaxInstanceLifetime *int64 `type:"integer"` + // The maximum size of the Auto Scaling group. MaxSize *int64 `type:"integer"` @@ -193,8 +193,7 @@ const opUpdateAutoScalingGroup = "UpdateAutoScalingGroup" // To update an Auto Scaling group, specify the name of the group and the parameter // that you want to change. Any parameters that you don't specify are not changed // by this update request. The new settings take effect on any scaling activities -// after this call returns. Scaling activities that are currently in progress -// aren't affected. +// after this call returns. // // If you associate a new launch configuration or template with an Auto Scaling // group, all new instances will get the updated configuration. Existing instances diff --git a/service/autoscaling/api_types.go b/service/autoscaling/api_types.go index b18f4126f67..a4c029d391b 100644 --- a/service/autoscaling/api_types.go +++ b/service/autoscaling/api_types.go @@ -155,6 +155,11 @@ type AutoScalingGroup struct { // One or more load balancers associated with the group. LoadBalancerNames []string `type:"list"` + // The maximum amount of time, in seconds, that an instance can be in service. + // + // Valid Range: Minimum value of 604800. + MaxInstanceLifetime *int64 `type:"integer"` + // The maximum size of the group. // // MaxSize is a required field @@ -230,6 +235,9 @@ type AutoScalingInstanceDetails struct { // InstanceId is a required field InstanceId *string `min:"1" type:"string" required:"true"` + // The instance type of the EC2 instance. + InstanceType *string `min:"1" type:"string"` + // The launch configuration used to launch the instance. This value is not available // if you attached the instance to the Auto Scaling group. LaunchConfigurationName *string `min:"1" type:"string"` @@ -247,6 +255,12 @@ type AutoScalingInstanceDetails struct { // // ProtectedFromScaleIn is a required field ProtectedFromScaleIn *bool `type:"boolean" required:"true"` + + // The number of capacity units contributed by the instance based on its instance + // type. + // + // Valid Range: Minimum value of 1. Maximum value of 999. + WeightedCapacity *string `min:"1" type:"string"` } // String returns the string representation @@ -453,7 +467,7 @@ type Ebs struct { // or sc1 for Cold HDD. For more information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon EC2 User Guide for Linux Instances. // - // Valid values: standard | io1 | gp2 | st1 | sc1 + // Valid Values: standard | io1 | gp2 | st1 | sc1 VolumeType *string `min:"1" type:"string"` } @@ -576,6 +590,9 @@ type Instance struct { // InstanceId is a required field InstanceId *string `min:"1" type:"string" required:"true"` + // The instance type of the EC2 instance. + InstanceType *string `min:"1" type:"string"` + // The launch configuration associated with the instance. LaunchConfigurationName *string `min:"1" type:"string"` @@ -593,6 +610,12 @@ type Instance struct { // // ProtectedFromScaleIn is a required field ProtectedFromScaleIn *bool `type:"boolean" required:"true"` + + // The number of capacity units contributed by the instance based on its instance + // type. + // + // Valid Range: Minimum value of 1. Maximum value of 999. + WeightedCapacity *string `min:"1" type:"string"` } // String returns the string representation @@ -619,6 +642,14 @@ func (s InstanceMonitoring) String() string { // and Spot Instances, the maximum price to pay for Spot Instances, and how // the Auto Scaling group allocates instance types to fulfill On-Demand and // Spot capacity. +// +// When you update SpotAllocationStrategy, SpotInstancePools, or SpotMaxPrice, +// this update action does not deploy any changes across the running Amazon +// EC2 instances in the group. Your existing Spot Instances continue to run +// as long as the maximum price for those instances is higher than the current +// Spot price. When scale out occurs, Amazon EC2 Auto Scaling launches instances +// based on the new settings. When scale in occurs, Amazon EC2 Auto Scaling +// terminates instances according to the group's termination policies. type InstancesDistribution struct { _ struct{} `type:"structure"` @@ -637,16 +668,28 @@ type InstancesDistribution struct { // by On-Demand Instances. This base portion is provisioned first as your group // scales. // - // The default value is 0. If you leave this parameter set to 0, On-Demand Instances - // are launched as a percentage of the Auto Scaling group's desired capacity, - // per the OnDemandPercentageAboveBaseCapacity setting. + // Default if not set is 0. If you leave it set to 0, On-Demand Instances are + // launched as a percentage of the Auto Scaling group's desired capacity, per + // the OnDemandPercentageAboveBaseCapacity setting. + // + // An update to this setting means a gradual replacement of instances to maintain + // the specified number of On-Demand Instances for your base capacity. When + // replacing instances, Amazon EC2 Auto Scaling launches new instances before + // terminating the old ones. OnDemandBaseCapacity *int64 `type:"integer"` // Controls the percentages of On-Demand Instances and Spot Instances for your - // additional capacity beyond OnDemandBaseCapacity. The range is 0–100. + // additional capacity beyond OnDemandBaseCapacity. // - // The default value is 100. If you leave this parameter set to 100, the percentages - // are 100% for On-Demand Instances and 0% for Spot Instances. + // Default if not set is 100. If you leave it set to 100, the percentages are + // 100% for On-Demand Instances and 0% for Spot Instances. + // + // An update to this setting means a gradual replacement of instances to maintain + // the percentage of On-Demand Instances for your additional capacity above + // the base capacity. When replacing instances, Amazon EC2 Auto Scaling launches + // new instances before terminating the old ones. + // + // Valid Range: Minimum value of 0. Maximum value of 100. OnDemandPercentageAboveBaseCapacity *int64 `type:"integer"` // Indicates how to allocate instances across Spot Instance pools. @@ -666,9 +709,11 @@ type InstancesDistribution struct { // The number of Spot Instance pools across which to allocate your Spot Instances. // The Spot pools are determined from the different instance types in the Overrides - // array of LaunchTemplate. The range is 1–20. The default value is 2. + // array of LaunchTemplate. Default if not set is 2. + // + // Used only when the Spot allocation strategy is lowest-price. // - // Valid only when the Spot allocation strategy is lowest-price. + // Valid Range: Minimum value of 1. Maximum value of 20. SpotInstancePools *int64 `type:"integer"` // The maximum price per unit hour that you are willing to pay for a Spot Instance. @@ -801,7 +846,7 @@ type LaunchConfiguration struct { // The maximum hourly price to be paid for any Spot Instance launched to fulfill // the request. Spot Instances are launched when the price you specify exceeds - // the current Spot market price. + // the current Spot price. // // For more information, see Launching Spot Instances in Your Auto Scaling Group // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-launch-spot-instances.html) @@ -825,6 +870,11 @@ func (s LaunchConfiguration) String() string { // The overrides are used to override the instance type specified by the launch // template with multiple instance types that can be used to launch On-Demand // Instances and Spot Instances. +// +// When you update the launch template or overrides, existing Amazon EC2 instances +// continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches +// instances to match the new settings. When scale in occurs, Amazon EC2 Auto +// Scaling terminates instances according to the group's termination policies. type LaunchTemplate struct { _ struct{} `type:"structure"` @@ -832,9 +882,9 @@ type LaunchTemplate struct { // or launch template name in the request. LaunchTemplateSpecification *LaunchTemplateSpecification `type:"structure"` - // Any parameters that you specify override the same parameters in the launch - // template. Currently, the only supported override is instance type. You must - // specify between 2 and 20 overrides. + // An optional setting. Any parameters that you specify override the same parameters + // in the launch template. Currently, the only supported override is instance + // type. You can specify between 1 and 20 instance types. Overrides []LaunchTemplateOverrides `type:"list"` } @@ -875,6 +925,15 @@ type LaunchTemplateOverrides struct { // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes) // in the Amazon Elastic Compute Cloud User Guide. InstanceType *string `min:"1" type:"string"` + + // The number of capacity units, which gives the instance type a proportional + // weight to other instance types. For example, larger instance types are generally + // weighted more than smaller instance types. These are the same units that + // you chose to set the desired capacity in terms of instances, or a performance + // attribute such as vCPUs, memory, or I/O. + // + // Valid Range: Minimum value of 1. Maximum value of 999. + WeightedCapacity *string `min:"1" type:"string"` } // String returns the string representation @@ -888,6 +947,9 @@ func (s *LaunchTemplateOverrides) Validate() error { if s.InstanceType != nil && len(*s.InstanceType) < 1 { invalidParams.Add(aws.NewErrParamMinLen("InstanceType", 1)) } + if s.WeightedCapacity != nil && len(*s.WeightedCapacity) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("WeightedCapacity", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -1285,8 +1347,8 @@ type MixedInstancesPolicy struct { // The instances distribution to use. // - // If you leave this parameter unspecified when creating a mixed instances policy, - // the default values are used. + // If you leave this parameter unspecified, the value for each parameter in + // InstancesDistribution uses a default value. InstancesDistribution *InstancesDistribution `type:"structure"` // The launch template and instance types (overrides). @@ -1350,13 +1412,7 @@ func (s NotificationConfiguration) String() string { type PredefinedMetricSpecification struct { _ struct{} `type:"structure"` - // The metric type. - // - // PredefinedMetricType is a required field - PredefinedMetricType MetricType `type:"string" required:"true" enum:"true"` - - // Identifies the resource associated with the metric type. The following predefined - // metrics are available: + // The metric type. The following predefined metrics are available: // // * ASGAverageCPUUtilization - Average CPU utilization of the Auto Scaling // group. @@ -1370,15 +1426,21 @@ type PredefinedMetricSpecification struct { // * ALBRequestCountPerTarget - Number of requests completed per target in // an Application Load Balancer target group. // - // For predefined metric types ASGAverageCPUUtilization, ASGAverageNetworkIn, - // and ASGAverageNetworkOut, the parameter must not be specified as the resource - // associated with the metric type is the Auto Scaling group. For predefined - // metric type ALBRequestCountPerTarget, the parameter must be specified in - // the format: app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id - // , where app/load-balancer-name/load-balancer-id is the final portion of the - // load balancer ARN, and targetgroup/target-group-name/target-group-id is the - // final portion of the target group ARN. The target group must be attached - // to the Auto Scaling group. + // PredefinedMetricType is a required field + PredefinedMetricType MetricType `type:"string" required:"true" enum:"true"` + + // Identifies the resource associated with the metric type. You can't specify + // a resource label unless the metric type is ALBRequestCountPerTarget and there + // is a target group attached to the Auto Scaling group. + // + // The format is app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id + // , where + // + // * app/load-balancer-name/load-balancer-id is the final portion of the + // load balancer ARN, and + // + // * targetgroup/target-group-name/target-group-id is the final portion of + // the target group ARN. ResourceLabel *string `min:"1" type:"string"` } diff --git a/service/chime/api_doc.go b/service/chime/api_doc.go index 104da361c0a..b41b2ff879d 100644 --- a/service/chime/api_doc.go +++ b/service/chime/api_doc.go @@ -4,10 +4,13 @@ // requests to Amazon Chime. // // The Amazon Chime API (application programming interface) is designed for -// administrators to use to perform key tasks, such as creating and managing -// Amazon Chime accounts and users. This guide provides detailed information +// developers to perform key tasks, such as creating and managing Amazon Chime +// accounts, users, and Voice Connectors. This guide provides detailed information // about the Amazon Chime API, including operations, types, inputs and outputs, -// and error codes. +// and error codes. It also includes some server-side API actions to use with +// the Amazon Chime SDK. For more information about the Amazon Chime SDK, see +// Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. // // You can use an AWS SDK, the AWS Command Line Interface (AWS CLI), or the // REST API to make API calls. We recommend using an AWS SDK or the AWS CLI. @@ -41,9 +44,9 @@ // https://service.chime.aws.amazon.com. // // Administrative permissions are controlled using AWS Identity and Access Management -// (IAM). For more information, see Control Access to the Amazon Chime Console -// (https://docs.aws.amazon.com/chime/latest/ag/control-access.html) in the -// Amazon Chime Administration Guide. +// (IAM). For more information, see Identity and Access Management for Amazon +// Chime (https://docs.aws.amazon.com/chime/latest/ag/security-iam.html) in +// the Amazon Chime Administration Guide. // // See https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01 for more information on this service. // diff --git a/service/chime/api_enums.go b/service/chime/api_enums.go index a144226bec5..256a2315fe2 100644 --- a/service/chime/api_enums.go +++ b/service/chime/api_enums.go @@ -140,6 +140,24 @@ func (enum License) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type MemberType string + +// Enum values for MemberType +const ( + MemberTypeUser MemberType = "User" + MemberTypeBot MemberType = "Bot" + MemberTypeWebhook MemberType = "Webhook" +) + +func (enum MemberType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum MemberType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type OrderedPhoneNumberStatus string // Enum values for OrderedPhoneNumberStatus @@ -288,6 +306,23 @@ func (enum RegistrationStatus) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type RoomMembershipRole string + +// Enum values for RoomMembershipRole +const ( + RoomMembershipRoleAdministrator RoomMembershipRole = "Administrator" + RoomMembershipRoleMember RoomMembershipRole = "Member" +) + +func (enum RoomMembershipRole) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum RoomMembershipRole) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type VoiceConnectorAwsRegion string // Enum values for VoiceConnectorAwsRegion diff --git a/service/chime/api_op_BatchCreateAttendee.go b/service/chime/api_op_BatchCreateAttendee.go new file mode 100644 index 00000000000..5342cb19ddc --- /dev/null +++ b/service/chime/api_op_BatchCreateAttendee.go @@ -0,0 +1,197 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type BatchCreateAttendeeInput struct { + _ struct{} `type:"structure"` + + // The request containing the attendees to create. + // + // Attendees is a required field + Attendees []CreateAttendeeRequestItem `type:"list" required:"true"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchCreateAttendeeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchCreateAttendeeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchCreateAttendeeInput"} + + if s.Attendees == nil { + invalidParams.Add(aws.NewErrParamRequired("Attendees")) + } + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + if s.Attendees != nil { + for i, v := range s.Attendees { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attendees", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s BatchCreateAttendeeInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Attendees != nil { + v := s.Attendees + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Attendees", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type BatchCreateAttendeeOutput struct { + _ struct{} `type:"structure"` + + // The attendee information, including attendees IDs and join tokens. + Attendees []Attendee `type:"list"` + + // If the action fails for one or more of the attendees in the request, a list + // of the attendees is returned, along with error codes and error messages. + Errors []CreateAttendeeError `type:"list"` +} + +// String returns the string representation +func (s BatchCreateAttendeeOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s BatchCreateAttendeeOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Attendees != nil { + v := s.Attendees + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Attendees", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.Errors != nil { + v := s.Errors + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Errors", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opBatchCreateAttendee = "BatchCreateAttendee" + +// BatchCreateAttendeeRequest returns a request value for making API operation for +// Amazon Chime. +// +// Creates up to 100 new attendees for an active Amazon Chime SDK meeting. For +// more information about the Amazon Chime SDK, see Using the Amazon Chime SDK +// (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) in the Amazon +// Chime Developer Guide. +// +// // Example sending a request using BatchCreateAttendeeRequest. +// req := client.BatchCreateAttendeeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/BatchCreateAttendee +func (c *Client) BatchCreateAttendeeRequest(input *BatchCreateAttendeeInput) BatchCreateAttendeeRequest { + op := &aws.Operation{ + Name: opBatchCreateAttendee, + HTTPMethod: "POST", + HTTPPath: "/meetings/{meetingId}/attendees?operation=batch-create", + } + + if input == nil { + input = &BatchCreateAttendeeInput{} + } + + req := c.newRequest(op, input, &BatchCreateAttendeeOutput{}) + return BatchCreateAttendeeRequest{Request: req, Input: input, Copy: c.BatchCreateAttendeeRequest} +} + +// BatchCreateAttendeeRequest is the request type for the +// BatchCreateAttendee API operation. +type BatchCreateAttendeeRequest struct { + *aws.Request + Input *BatchCreateAttendeeInput + Copy func(*BatchCreateAttendeeInput) BatchCreateAttendeeRequest +} + +// Send marshals and sends the BatchCreateAttendee API request. +func (r BatchCreateAttendeeRequest) Send(ctx context.Context) (*BatchCreateAttendeeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &BatchCreateAttendeeResponse{ + BatchCreateAttendeeOutput: r.Request.Data.(*BatchCreateAttendeeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// BatchCreateAttendeeResponse is the response type for the +// BatchCreateAttendee API operation. +type BatchCreateAttendeeResponse struct { + *BatchCreateAttendeeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// BatchCreateAttendee request. +func (r *BatchCreateAttendeeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_BatchCreateRoomMembership.go b/service/chime/api_op_BatchCreateRoomMembership.go new file mode 100644 index 00000000000..895dfcf8d03 --- /dev/null +++ b/service/chime/api_op_BatchCreateRoomMembership.go @@ -0,0 +1,188 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type BatchCreateRoomMembershipInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The list of membership items. + // + // MembershipItemList is a required field + MembershipItemList []MembershipItem `type:"list" required:"true"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchCreateRoomMembershipInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchCreateRoomMembershipInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchCreateRoomMembershipInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.MembershipItemList == nil { + invalidParams.Add(aws.NewErrParamRequired("MembershipItemList")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s BatchCreateRoomMembershipInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MembershipItemList != nil { + v := s.MembershipItemList + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "MembershipItemList", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type BatchCreateRoomMembershipOutput struct { + _ struct{} `type:"structure"` + + // If the action fails for one or more of the member IDs in the request, a list + // of the member IDs is returned, along with error codes and error messages. + Errors []MemberError `type:"list"` +} + +// String returns the string representation +func (s BatchCreateRoomMembershipOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s BatchCreateRoomMembershipOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Errors != nil { + v := s.Errors + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Errors", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opBatchCreateRoomMembership = "BatchCreateRoomMembership" + +// BatchCreateRoomMembershipRequest returns a request value for making API operation for +// Amazon Chime. +// +// Adds up to 50 members to a chat room. Members can be either users or bots. +// The member role designates whether the member is a chat room administrator +// or a general chat room member. +// +// // Example sending a request using BatchCreateRoomMembershipRequest. +// req := client.BatchCreateRoomMembershipRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/BatchCreateRoomMembership +func (c *Client) BatchCreateRoomMembershipRequest(input *BatchCreateRoomMembershipInput) BatchCreateRoomMembershipRequest { + op := &aws.Operation{ + Name: opBatchCreateRoomMembership, + HTTPMethod: "POST", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}/memberships?operation=batch-create", + } + + if input == nil { + input = &BatchCreateRoomMembershipInput{} + } + + req := c.newRequest(op, input, &BatchCreateRoomMembershipOutput{}) + return BatchCreateRoomMembershipRequest{Request: req, Input: input, Copy: c.BatchCreateRoomMembershipRequest} +} + +// BatchCreateRoomMembershipRequest is the request type for the +// BatchCreateRoomMembership API operation. +type BatchCreateRoomMembershipRequest struct { + *aws.Request + Input *BatchCreateRoomMembershipInput + Copy func(*BatchCreateRoomMembershipInput) BatchCreateRoomMembershipRequest +} + +// Send marshals and sends the BatchCreateRoomMembership API request. +func (r BatchCreateRoomMembershipRequest) Send(ctx context.Context) (*BatchCreateRoomMembershipResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &BatchCreateRoomMembershipResponse{ + BatchCreateRoomMembershipOutput: r.Request.Data.(*BatchCreateRoomMembershipOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// BatchCreateRoomMembershipResponse is the response type for the +// BatchCreateRoomMembership API operation. +type BatchCreateRoomMembershipResponse struct { + *BatchCreateRoomMembershipOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// BatchCreateRoomMembership request. +func (r *BatchCreateRoomMembershipResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_CreateAttendee.go b/service/chime/api_op_CreateAttendee.go new file mode 100644 index 00000000000..5e1ce83a469 --- /dev/null +++ b/service/chime/api_op_CreateAttendee.go @@ -0,0 +1,164 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateAttendeeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK external user ID. Links the attendee to an identity + // managed by a builder application. + // + // ExternalUserId is a required field + ExternalUserId *string `min:"2" type:"string" required:"true" sensitive:"true"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAttendeeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAttendeeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateAttendeeInput"} + + if s.ExternalUserId == nil { + invalidParams.Add(aws.NewErrParamRequired("ExternalUserId")) + } + if s.ExternalUserId != nil && len(*s.ExternalUserId) < 2 { + invalidParams.Add(aws.NewErrParamMinLen("ExternalUserId", 2)) + } + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateAttendeeInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ExternalUserId != nil { + v := *s.ExternalUserId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ExternalUserId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateAttendeeOutput struct { + _ struct{} `type:"structure"` + + // The attendee information, including attendee ID and join token. + Attendee *Attendee `type:"structure"` +} + +// String returns the string representation +func (s CreateAttendeeOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateAttendeeOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Attendee != nil { + v := s.Attendee + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Attendee", v, metadata) + } + return nil +} + +const opCreateAttendee = "CreateAttendee" + +// CreateAttendeeRequest returns a request value for making API operation for +// Amazon Chime. +// +// Creates a new attendee for an active Amazon Chime SDK meeting. For more information +// about the Amazon Chime SDK, see Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using CreateAttendeeRequest. +// req := client.CreateAttendeeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/CreateAttendee +func (c *Client) CreateAttendeeRequest(input *CreateAttendeeInput) CreateAttendeeRequest { + op := &aws.Operation{ + Name: opCreateAttendee, + HTTPMethod: "POST", + HTTPPath: "/meetings/{meetingId}/attendees", + } + + if input == nil { + input = &CreateAttendeeInput{} + } + + req := c.newRequest(op, input, &CreateAttendeeOutput{}) + return CreateAttendeeRequest{Request: req, Input: input, Copy: c.CreateAttendeeRequest} +} + +// CreateAttendeeRequest is the request type for the +// CreateAttendee API operation. +type CreateAttendeeRequest struct { + *aws.Request + Input *CreateAttendeeInput + Copy func(*CreateAttendeeInput) CreateAttendeeRequest +} + +// Send marshals and sends the CreateAttendee API request. +func (r CreateAttendeeRequest) Send(ctx context.Context) (*CreateAttendeeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateAttendeeResponse{ + CreateAttendeeOutput: r.Request.Data.(*CreateAttendeeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateAttendeeResponse is the response type for the +// CreateAttendee API operation. +type CreateAttendeeResponse struct { + *CreateAttendeeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateAttendee request. +func (r *CreateAttendeeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_CreateMeeting.go b/service/chime/api_op_CreateMeeting.go new file mode 100644 index 00000000000..4bc6344e70d --- /dev/null +++ b/service/chime/api_op_CreateMeeting.go @@ -0,0 +1,192 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateMeetingInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the client request. Use a different token for different + // meetings. + // + // ClientRequestToken is a required field + ClientRequestToken *string `min:"2" type:"string" required:"true" idempotencyToken:"true" sensitive:"true"` + + // The Region in which to create the meeting. Available values: us-east-1, us-west-2. + MediaRegion *string `type:"string"` + + // Reserved. + MeetingHostId *string `min:"2" type:"string" sensitive:"true"` + + // The configuration for resource targets to receive notifications when meeting + // and attendee events occur. + NotificationsConfiguration *MeetingNotificationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s CreateMeetingInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMeetingInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateMeetingInput"} + + if s.ClientRequestToken == nil { + invalidParams.Add(aws.NewErrParamRequired("ClientRequestToken")) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 2 { + invalidParams.Add(aws.NewErrParamMinLen("ClientRequestToken", 2)) + } + if s.MeetingHostId != nil && len(*s.MeetingHostId) < 2 { + invalidParams.Add(aws.NewErrParamMinLen("MeetingHostId", 2)) + } + if s.NotificationsConfiguration != nil { + if err := s.NotificationsConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationsConfiguration", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateMeetingInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + var ClientRequestToken string + if s.ClientRequestToken != nil { + ClientRequestToken = *s.ClientRequestToken + } else { + ClientRequestToken = protocol.GetIdempotencyToken() + } + { + v := ClientRequestToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ClientRequestToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MediaRegion != nil { + v := *s.MediaRegion + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MediaRegion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MeetingHostId != nil { + v := *s.MeetingHostId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MeetingHostId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.NotificationsConfiguration != nil { + v := s.NotificationsConfiguration + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "NotificationsConfiguration", v, metadata) + } + return nil +} + +type CreateMeetingOutput struct { + _ struct{} `type:"structure"` + + // The meeting information, including the meeting ID and MediaPlacement. + Meeting *Meeting `type:"structure"` +} + +// String returns the string representation +func (s CreateMeetingOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateMeetingOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Meeting != nil { + v := s.Meeting + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Meeting", v, metadata) + } + return nil +} + +const opCreateMeeting = "CreateMeeting" + +// CreateMeetingRequest returns a request value for making API operation for +// Amazon Chime. +// +// Creates a new Amazon Chime SDK meeting in the specified media Region with +// no initial attendees. For more information about the Amazon Chime SDK, see +// Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using CreateMeetingRequest. +// req := client.CreateMeetingRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/CreateMeeting +func (c *Client) CreateMeetingRequest(input *CreateMeetingInput) CreateMeetingRequest { + op := &aws.Operation{ + Name: opCreateMeeting, + HTTPMethod: "POST", + HTTPPath: "/meetings", + } + + if input == nil { + input = &CreateMeetingInput{} + } + + req := c.newRequest(op, input, &CreateMeetingOutput{}) + return CreateMeetingRequest{Request: req, Input: input, Copy: c.CreateMeetingRequest} +} + +// CreateMeetingRequest is the request type for the +// CreateMeeting API operation. +type CreateMeetingRequest struct { + *aws.Request + Input *CreateMeetingInput + Copy func(*CreateMeetingInput) CreateMeetingRequest +} + +// Send marshals and sends the CreateMeeting API request. +func (r CreateMeetingRequest) Send(ctx context.Context) (*CreateMeetingResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateMeetingResponse{ + CreateMeetingOutput: r.Request.Data.(*CreateMeetingOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateMeetingResponse is the response type for the +// CreateMeeting API operation. +type CreateMeetingResponse struct { + *CreateMeetingOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateMeeting request. +func (r *CreateMeetingResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_CreateRoom.go b/service/chime/api_op_CreateRoom.go new file mode 100644 index 00000000000..98af65aea73 --- /dev/null +++ b/service/chime/api_op_CreateRoom.go @@ -0,0 +1,176 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateRoomInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The idempotency token for the request. + ClientRequestToken *string `min:"2" type:"string" idempotencyToken:"true" sensitive:"true"` + + // The room name. + // + // Name is a required field + Name *string `type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s CreateRoomInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRoomInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateRoomInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 2 { + invalidParams.Add(aws.NewErrParamMinLen("ClientRequestToken", 2)) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateRoomInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + var ClientRequestToken string + if s.ClientRequestToken != nil { + ClientRequestToken = *s.ClientRequestToken + } else { + ClientRequestToken = protocol.GetIdempotencyToken() + } + { + v := ClientRequestToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ClientRequestToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateRoomOutput struct { + _ struct{} `type:"structure"` + + // The room details. + Room *Room `type:"structure"` +} + +// String returns the string representation +func (s CreateRoomOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateRoomOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Room != nil { + v := s.Room + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Room", v, metadata) + } + return nil +} + +const opCreateRoom = "CreateRoom" + +// CreateRoomRequest returns a request value for making API operation for +// Amazon Chime. +// +// Creates a chat room for the specified Amazon Chime account. +// +// // Example sending a request using CreateRoomRequest. +// req := client.CreateRoomRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/CreateRoom +func (c *Client) CreateRoomRequest(input *CreateRoomInput) CreateRoomRequest { + op := &aws.Operation{ + Name: opCreateRoom, + HTTPMethod: "POST", + HTTPPath: "/accounts/{accountId}/rooms", + } + + if input == nil { + input = &CreateRoomInput{} + } + + req := c.newRequest(op, input, &CreateRoomOutput{}) + return CreateRoomRequest{Request: req, Input: input, Copy: c.CreateRoomRequest} +} + +// CreateRoomRequest is the request type for the +// CreateRoom API operation. +type CreateRoomRequest struct { + *aws.Request + Input *CreateRoomInput + Copy func(*CreateRoomInput) CreateRoomRequest +} + +// Send marshals and sends the CreateRoom API request. +func (r CreateRoomRequest) Send(ctx context.Context) (*CreateRoomResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateRoomResponse{ + CreateRoomOutput: r.Request.Data.(*CreateRoomOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateRoomResponse is the response type for the +// CreateRoom API operation. +type CreateRoomResponse struct { + *CreateRoomOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateRoom request. +func (r *CreateRoomResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_CreateRoomMembership.go b/service/chime/api_op_CreateRoomMembership.go new file mode 100644 index 00000000000..b6f7d725b99 --- /dev/null +++ b/service/chime/api_op_CreateRoomMembership.go @@ -0,0 +1,184 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateRoomMembershipInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The Amazon Chime member ID (user ID or bot ID). + // + // MemberId is a required field + MemberId *string `type:"string" required:"true"` + + // The role of the member. + Role RoomMembershipRole `type:"string" enum:"true"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateRoomMembershipInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRoomMembershipInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateRoomMembershipInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.MemberId == nil { + invalidParams.Add(aws.NewErrParamRequired("MemberId")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateRoomMembershipInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MemberId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Role) > 0 { + v := s.Role + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Role", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateRoomMembershipOutput struct { + _ struct{} `type:"structure"` + + // The room membership details. + RoomMembership *RoomMembership `type:"structure"` +} + +// String returns the string representation +func (s CreateRoomMembershipOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateRoomMembershipOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.RoomMembership != nil { + v := s.RoomMembership + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RoomMembership", v, metadata) + } + return nil +} + +const opCreateRoomMembership = "CreateRoomMembership" + +// CreateRoomMembershipRequest returns a request value for making API operation for +// Amazon Chime. +// +// Adds a member to a chat room. A member can be either a user or a bot. The +// member role designates whether the member is a chat room administrator or +// a general chat room member. +// +// // Example sending a request using CreateRoomMembershipRequest. +// req := client.CreateRoomMembershipRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/CreateRoomMembership +func (c *Client) CreateRoomMembershipRequest(input *CreateRoomMembershipInput) CreateRoomMembershipRequest { + op := &aws.Operation{ + Name: opCreateRoomMembership, + HTTPMethod: "POST", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}/memberships", + } + + if input == nil { + input = &CreateRoomMembershipInput{} + } + + req := c.newRequest(op, input, &CreateRoomMembershipOutput{}) + return CreateRoomMembershipRequest{Request: req, Input: input, Copy: c.CreateRoomMembershipRequest} +} + +// CreateRoomMembershipRequest is the request type for the +// CreateRoomMembership API operation. +type CreateRoomMembershipRequest struct { + *aws.Request + Input *CreateRoomMembershipInput + Copy func(*CreateRoomMembershipInput) CreateRoomMembershipRequest +} + +// Send marshals and sends the CreateRoomMembership API request. +func (r CreateRoomMembershipRequest) Send(ctx context.Context) (*CreateRoomMembershipResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateRoomMembershipResponse{ + CreateRoomMembershipOutput: r.Request.Data.(*CreateRoomMembershipOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateRoomMembershipResponse is the response type for the +// CreateRoomMembership API operation. +type CreateRoomMembershipResponse struct { + *CreateRoomMembershipOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateRoomMembership request. +func (r *CreateRoomMembershipResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_DeleteAttendee.go b/service/chime/api_op_DeleteAttendee.go new file mode 100644 index 00000000000..66389e7c661 --- /dev/null +++ b/service/chime/api_op_DeleteAttendee.go @@ -0,0 +1,156 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type DeleteAttendeeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK attendee ID. + // + // AttendeeId is a required field + AttendeeId *string `location:"uri" locationName:"attendeeId" type:"string" required:"true"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAttendeeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAttendeeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteAttendeeInput"} + + if s.AttendeeId == nil { + invalidParams.Add(aws.NewErrParamRequired("AttendeeId")) + } + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteAttendeeInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AttendeeId != nil { + v := *s.AttendeeId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "attendeeId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteAttendeeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAttendeeOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteAttendeeOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteAttendee = "DeleteAttendee" + +// DeleteAttendeeRequest returns a request value for making API operation for +// Amazon Chime. +// +// Deletes an attendee from the specified Amazon Chime SDK meeting and deletes +// their JoinToken. Attendees are automatically deleted when a Amazon Chime +// SDK meeting is deleted. For more information about the Amazon Chime SDK, +// see Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using DeleteAttendeeRequest. +// req := client.DeleteAttendeeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/DeleteAttendee +func (c *Client) DeleteAttendeeRequest(input *DeleteAttendeeInput) DeleteAttendeeRequest { + op := &aws.Operation{ + Name: opDeleteAttendee, + HTTPMethod: "DELETE", + HTTPPath: "/meetings/{meetingId}/attendees/{attendeeId}", + } + + if input == nil { + input = &DeleteAttendeeInput{} + } + + req := c.newRequest(op, input, &DeleteAttendeeOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteAttendeeRequest{Request: req, Input: input, Copy: c.DeleteAttendeeRequest} +} + +// DeleteAttendeeRequest is the request type for the +// DeleteAttendee API operation. +type DeleteAttendeeRequest struct { + *aws.Request + Input *DeleteAttendeeInput + Copy func(*DeleteAttendeeInput) DeleteAttendeeRequest +} + +// Send marshals and sends the DeleteAttendee API request. +func (r DeleteAttendeeRequest) Send(ctx context.Context) (*DeleteAttendeeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteAttendeeResponse{ + DeleteAttendeeOutput: r.Request.Data.(*DeleteAttendeeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteAttendeeResponse is the response type for the +// DeleteAttendee API operation. +type DeleteAttendeeResponse struct { + *DeleteAttendeeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteAttendee request. +func (r *DeleteAttendeeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_DeleteMeeting.go b/service/chime/api_op_DeleteMeeting.go new file mode 100644 index 00000000000..60922f84581 --- /dev/null +++ b/service/chime/api_op_DeleteMeeting.go @@ -0,0 +1,140 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type DeleteMeetingInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMeetingInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMeetingInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteMeetingInput"} + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteMeetingInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteMeetingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMeetingOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteMeetingOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteMeeting = "DeleteMeeting" + +// DeleteMeetingRequest returns a request value for making API operation for +// Amazon Chime. +// +// Deletes the specified Amazon Chime SDK meeting. When a meeting is deleted, +// its attendees are also deleted and clients can no longer join it. For more +// information about the Amazon Chime SDK, see Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using DeleteMeetingRequest. +// req := client.DeleteMeetingRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/DeleteMeeting +func (c *Client) DeleteMeetingRequest(input *DeleteMeetingInput) DeleteMeetingRequest { + op := &aws.Operation{ + Name: opDeleteMeeting, + HTTPMethod: "DELETE", + HTTPPath: "/meetings/{meetingId}", + } + + if input == nil { + input = &DeleteMeetingInput{} + } + + req := c.newRequest(op, input, &DeleteMeetingOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteMeetingRequest{Request: req, Input: input, Copy: c.DeleteMeetingRequest} +} + +// DeleteMeetingRequest is the request type for the +// DeleteMeeting API operation. +type DeleteMeetingRequest struct { + *aws.Request + Input *DeleteMeetingInput + Copy func(*DeleteMeetingInput) DeleteMeetingRequest +} + +// Send marshals and sends the DeleteMeeting API request. +func (r DeleteMeetingRequest) Send(ctx context.Context) (*DeleteMeetingResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteMeetingResponse{ + DeleteMeetingOutput: r.Request.Data.(*DeleteMeetingOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteMeetingResponse is the response type for the +// DeleteMeeting API operation. +type DeleteMeetingResponse struct { + *DeleteMeetingOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteMeeting request. +func (r *DeleteMeetingResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_DeleteRoom.go b/service/chime/api_op_DeleteRoom.go new file mode 100644 index 00000000000..01a0e08b39b --- /dev/null +++ b/service/chime/api_op_DeleteRoom.go @@ -0,0 +1,152 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type DeleteRoomInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The chat room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRoomInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRoomInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteRoomInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteRoomInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteRoomOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRoomOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteRoomOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteRoom = "DeleteRoom" + +// DeleteRoomRequest returns a request value for making API operation for +// Amazon Chime. +// +// Deletes a chat room. +// +// // Example sending a request using DeleteRoomRequest. +// req := client.DeleteRoomRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/DeleteRoom +func (c *Client) DeleteRoomRequest(input *DeleteRoomInput) DeleteRoomRequest { + op := &aws.Operation{ + Name: opDeleteRoom, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}", + } + + if input == nil { + input = &DeleteRoomInput{} + } + + req := c.newRequest(op, input, &DeleteRoomOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteRoomRequest{Request: req, Input: input, Copy: c.DeleteRoomRequest} +} + +// DeleteRoomRequest is the request type for the +// DeleteRoom API operation. +type DeleteRoomRequest struct { + *aws.Request + Input *DeleteRoomInput + Copy func(*DeleteRoomInput) DeleteRoomRequest +} + +// Send marshals and sends the DeleteRoom API request. +func (r DeleteRoomRequest) Send(ctx context.Context) (*DeleteRoomResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteRoomResponse{ + DeleteRoomOutput: r.Request.Data.(*DeleteRoomOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteRoomResponse is the response type for the +// DeleteRoom API operation. +type DeleteRoomResponse struct { + *DeleteRoomOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteRoom request. +func (r *DeleteRoomResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_DeleteRoomMembership.go b/service/chime/api_op_DeleteRoomMembership.go new file mode 100644 index 00000000000..fded03a9712 --- /dev/null +++ b/service/chime/api_op_DeleteRoomMembership.go @@ -0,0 +1,167 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type DeleteRoomMembershipInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The member ID (user ID or bot ID). + // + // MemberId is a required field + MemberId *string `location:"uri" locationName:"memberId" type:"string" required:"true"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRoomMembershipInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRoomMembershipInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteRoomMembershipInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.MemberId == nil { + invalidParams.Add(aws.NewErrParamRequired("MemberId")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteRoomMembershipInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "memberId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteRoomMembershipOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRoomMembershipOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteRoomMembershipOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteRoomMembership = "DeleteRoomMembership" + +// DeleteRoomMembershipRequest returns a request value for making API operation for +// Amazon Chime. +// +// Removes a member from a chat room. +// +// // Example sending a request using DeleteRoomMembershipRequest. +// req := client.DeleteRoomMembershipRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/DeleteRoomMembership +func (c *Client) DeleteRoomMembershipRequest(input *DeleteRoomMembershipInput) DeleteRoomMembershipRequest { + op := &aws.Operation{ + Name: opDeleteRoomMembership, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}/memberships/{memberId}", + } + + if input == nil { + input = &DeleteRoomMembershipInput{} + } + + req := c.newRequest(op, input, &DeleteRoomMembershipOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteRoomMembershipRequest{Request: req, Input: input, Copy: c.DeleteRoomMembershipRequest} +} + +// DeleteRoomMembershipRequest is the request type for the +// DeleteRoomMembership API operation. +type DeleteRoomMembershipRequest struct { + *aws.Request + Input *DeleteRoomMembershipInput + Copy func(*DeleteRoomMembershipInput) DeleteRoomMembershipRequest +} + +// Send marshals and sends the DeleteRoomMembership API request. +func (r DeleteRoomMembershipRequest) Send(ctx context.Context) (*DeleteRoomMembershipResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteRoomMembershipResponse{ + DeleteRoomMembershipOutput: r.Request.Data.(*DeleteRoomMembershipOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteRoomMembershipResponse is the response type for the +// DeleteRoomMembership API operation. +type DeleteRoomMembershipResponse struct { + *DeleteRoomMembershipOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteRoomMembership request. +func (r *DeleteRoomMembershipResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_GetAttendee.go b/service/chime/api_op_GetAttendee.go new file mode 100644 index 00000000000..b6885700d4a --- /dev/null +++ b/service/chime/api_op_GetAttendee.go @@ -0,0 +1,161 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetAttendeeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK attendee ID. + // + // AttendeeId is a required field + AttendeeId *string `location:"uri" locationName:"attendeeId" type:"string" required:"true"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAttendeeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAttendeeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetAttendeeInput"} + + if s.AttendeeId == nil { + invalidParams.Add(aws.NewErrParamRequired("AttendeeId")) + } + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetAttendeeInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AttendeeId != nil { + v := *s.AttendeeId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "attendeeId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetAttendeeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK attendee information. + Attendee *Attendee `type:"structure"` +} + +// String returns the string representation +func (s GetAttendeeOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetAttendeeOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Attendee != nil { + v := s.Attendee + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Attendee", v, metadata) + } + return nil +} + +const opGetAttendee = "GetAttendee" + +// GetAttendeeRequest returns a request value for making API operation for +// Amazon Chime. +// +// Gets the Amazon Chime SDK attendee details for a specified meeting ID and +// attendee ID. For more information about the Amazon Chime SDK, see Using the +// Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using GetAttendeeRequest. +// req := client.GetAttendeeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/GetAttendee +func (c *Client) GetAttendeeRequest(input *GetAttendeeInput) GetAttendeeRequest { + op := &aws.Operation{ + Name: opGetAttendee, + HTTPMethod: "GET", + HTTPPath: "/meetings/{meetingId}/attendees/{attendeeId}", + } + + if input == nil { + input = &GetAttendeeInput{} + } + + req := c.newRequest(op, input, &GetAttendeeOutput{}) + return GetAttendeeRequest{Request: req, Input: input, Copy: c.GetAttendeeRequest} +} + +// GetAttendeeRequest is the request type for the +// GetAttendee API operation. +type GetAttendeeRequest struct { + *aws.Request + Input *GetAttendeeInput + Copy func(*GetAttendeeInput) GetAttendeeRequest +} + +// Send marshals and sends the GetAttendee API request. +func (r GetAttendeeRequest) Send(ctx context.Context) (*GetAttendeeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetAttendeeResponse{ + GetAttendeeOutput: r.Request.Data.(*GetAttendeeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetAttendeeResponse is the response type for the +// GetAttendee API operation. +type GetAttendeeResponse struct { + *GetAttendeeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetAttendee request. +func (r *GetAttendeeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_GetMeeting.go b/service/chime/api_op_GetMeeting.go new file mode 100644 index 00000000000..eed489aacc1 --- /dev/null +++ b/service/chime/api_op_GetMeeting.go @@ -0,0 +1,146 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetMeetingInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMeetingInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMeetingInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetMeetingInput"} + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetMeetingInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetMeetingOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK meeting information. + Meeting *Meeting `type:"structure"` +} + +// String returns the string representation +func (s GetMeetingOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetMeetingOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Meeting != nil { + v := s.Meeting + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Meeting", v, metadata) + } + return nil +} + +const opGetMeeting = "GetMeeting" + +// GetMeetingRequest returns a request value for making API operation for +// Amazon Chime. +// +// Gets the Amazon Chime SDK meeting details for the specified meeting ID. For +// more information about the Amazon Chime SDK, see Using the Amazon Chime SDK +// (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) in the Amazon +// Chime Developer Guide. +// +// // Example sending a request using GetMeetingRequest. +// req := client.GetMeetingRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/GetMeeting +func (c *Client) GetMeetingRequest(input *GetMeetingInput) GetMeetingRequest { + op := &aws.Operation{ + Name: opGetMeeting, + HTTPMethod: "GET", + HTTPPath: "/meetings/{meetingId}", + } + + if input == nil { + input = &GetMeetingInput{} + } + + req := c.newRequest(op, input, &GetMeetingOutput{}) + return GetMeetingRequest{Request: req, Input: input, Copy: c.GetMeetingRequest} +} + +// GetMeetingRequest is the request type for the +// GetMeeting API operation. +type GetMeetingRequest struct { + *aws.Request + Input *GetMeetingInput + Copy func(*GetMeetingInput) GetMeetingRequest +} + +// Send marshals and sends the GetMeeting API request. +func (r GetMeetingRequest) Send(ctx context.Context) (*GetMeetingResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetMeetingResponse{ + GetMeetingOutput: r.Request.Data.(*GetMeetingOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetMeetingResponse is the response type for the +// GetMeeting API operation. +type GetMeetingResponse struct { + *GetMeetingOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetMeeting request. +func (r *GetMeetingResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_GetRoom.go b/service/chime/api_op_GetRoom.go new file mode 100644 index 00000000000..76447cc6f0d --- /dev/null +++ b/service/chime/api_op_GetRoom.go @@ -0,0 +1,158 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetRoomInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRoomInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRoomInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetRoomInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetRoomInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetRoomOutput struct { + _ struct{} `type:"structure"` + + // The room details. + Room *Room `type:"structure"` +} + +// String returns the string representation +func (s GetRoomOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetRoomOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Room != nil { + v := s.Room + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Room", v, metadata) + } + return nil +} + +const opGetRoom = "GetRoom" + +// GetRoomRequest returns a request value for making API operation for +// Amazon Chime. +// +// Retrieves room details, such as name. +// +// // Example sending a request using GetRoomRequest. +// req := client.GetRoomRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/GetRoom +func (c *Client) GetRoomRequest(input *GetRoomInput) GetRoomRequest { + op := &aws.Operation{ + Name: opGetRoom, + HTTPMethod: "GET", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}", + } + + if input == nil { + input = &GetRoomInput{} + } + + req := c.newRequest(op, input, &GetRoomOutput{}) + return GetRoomRequest{Request: req, Input: input, Copy: c.GetRoomRequest} +} + +// GetRoomRequest is the request type for the +// GetRoom API operation. +type GetRoomRequest struct { + *aws.Request + Input *GetRoomInput + Copy func(*GetRoomInput) GetRoomRequest +} + +// Send marshals and sends the GetRoom API request. +func (r GetRoomRequest) Send(ctx context.Context) (*GetRoomResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetRoomResponse{ + GetRoomOutput: r.Request.Data.(*GetRoomOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetRoomResponse is the response type for the +// GetRoom API operation. +type GetRoomResponse struct { + *GetRoomOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetRoom request. +func (r *GetRoomResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_GetVoiceConnectorStreamingConfiguration.go b/service/chime/api_op_GetVoiceConnectorStreamingConfiguration.go index c2a04f1eb24..926f163fb1f 100644 --- a/service/chime/api_op_GetVoiceConnectorStreamingConfiguration.go +++ b/service/chime/api_op_GetVoiceConnectorStreamingConfiguration.go @@ -81,8 +81,8 @@ const opGetVoiceConnectorStreamingConfiguration = "GetVoiceConnectorStreamingCon // // Retrieves the streaming configuration details for the specified Amazon Chime // Voice Connector. Shows whether media streaming is enabled for sending to -// Amazon Kinesis, and shows the retention period for the Amazon Kinesis data, -// in hours. +// Amazon Kinesis. It also shows the retention period, in hours, for the Amazon +// Kinesis data. // // // Example sending a request using GetVoiceConnectorStreamingConfigurationRequest. // req := client.GetVoiceConnectorStreamingConfigurationRequest(params) diff --git a/service/chime/api_op_InviteUsers.go b/service/chime/api_op_InviteUsers.go index 6e1bd7e7d20..f8a713acdba 100644 --- a/service/chime/api_op_InviteUsers.go +++ b/service/chime/api_op_InviteUsers.go @@ -18,7 +18,7 @@ type InviteUsersInput struct { // AccountId is a required field AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` - // The user email addresses to which to send the invite. + // The user email addresses to which to send the email invitation. // // UserEmailList is a required field UserEmailList []string `type:"list" required:"true"` @@ -75,7 +75,7 @@ func (s InviteUsersInput) MarshalFields(e protocol.FieldEncoder) error { type InviteUsersOutput struct { _ struct{} `type:"structure"` - // The invite details. + // The email invitation details. Invites []Invite `type:"list"` } @@ -106,9 +106,9 @@ const opInviteUsers = "InviteUsers" // InviteUsersRequest returns a request value for making API operation for // Amazon Chime. // -// Sends email invites to as many as 50 users, inviting them to the specified -// Amazon Chime Team account. Only Team account types are currently supported -// for this action. +// Sends email to a maximum of 50 users, inviting them to the specified Amazon +// Chime Team account. Only Team account types are currently supported for this +// action. // // // Example sending a request using InviteUsersRequest. // req := client.InviteUsersRequest(params) diff --git a/service/chime/api_op_ListAttendees.go b/service/chime/api_op_ListAttendees.go new file mode 100644 index 00000000000..2c596a9ee6b --- /dev/null +++ b/service/chime/api_op_ListAttendees.go @@ -0,0 +1,234 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListAttendeesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The Amazon Chime SDK meeting ID. + // + // MeetingId is a required field + MeetingId *string `location:"uri" locationName:"meetingId" type:"string" required:"true"` + + // The token to use to retrieve the next page of results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListAttendeesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttendeesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListAttendeesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if s.MeetingId == nil { + invalidParams.Add(aws.NewErrParamRequired("MeetingId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListAttendeesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "meetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListAttendeesOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK attendee information. + Attendees []Attendee `type:"list"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListAttendeesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListAttendeesOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Attendees != nil { + v := s.Attendees + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Attendees", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListAttendees = "ListAttendees" + +// ListAttendeesRequest returns a request value for making API operation for +// Amazon Chime. +// +// Lists the attendees for the specified Amazon Chime SDK meeting. For more +// information about the Amazon Chime SDK, see Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using ListAttendeesRequest. +// req := client.ListAttendeesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/ListAttendees +func (c *Client) ListAttendeesRequest(input *ListAttendeesInput) ListAttendeesRequest { + op := &aws.Operation{ + Name: opListAttendees, + HTTPMethod: "GET", + HTTPPath: "/meetings/{meetingId}/attendees", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAttendeesInput{} + } + + req := c.newRequest(op, input, &ListAttendeesOutput{}) + return ListAttendeesRequest{Request: req, Input: input, Copy: c.ListAttendeesRequest} +} + +// ListAttendeesRequest is the request type for the +// ListAttendees API operation. +type ListAttendeesRequest struct { + *aws.Request + Input *ListAttendeesInput + Copy func(*ListAttendeesInput) ListAttendeesRequest +} + +// Send marshals and sends the ListAttendees API request. +func (r ListAttendeesRequest) Send(ctx context.Context) (*ListAttendeesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListAttendeesResponse{ + ListAttendeesOutput: r.Request.Data.(*ListAttendeesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListAttendeesRequestPaginator returns a paginator for ListAttendees. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListAttendeesRequest(input) +// p := chime.NewListAttendeesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListAttendeesPaginator(req ListAttendeesRequest) ListAttendeesPaginator { + return ListAttendeesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListAttendeesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListAttendeesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListAttendeesPaginator struct { + aws.Pager +} + +func (p *ListAttendeesPaginator) CurrentPage() *ListAttendeesOutput { + return p.Pager.CurrentPage().(*ListAttendeesOutput) +} + +// ListAttendeesResponse is the response type for the +// ListAttendees API operation. +type ListAttendeesResponse struct { + *ListAttendeesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListAttendees request. +func (r *ListAttendeesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_ListBots.go b/service/chime/api_op_ListBots.go index 9d15a279c1d..2c04f7439b9 100644 --- a/service/chime/api_op_ListBots.go +++ b/service/chime/api_op_ListBots.go @@ -18,7 +18,8 @@ type ListBotsInput struct { // AccountId is a required field AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` - // The maximum number of results to return in a single call. Default is 10. + // The maximum number of results to return in a single call. The default is + // 10. MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` // The token to use to retrieve the next page of results. diff --git a/service/chime/api_op_ListMeetings.go b/service/chime/api_op_ListMeetings.go new file mode 100644 index 00000000000..5f784e5cb4e --- /dev/null +++ b/service/chime/api_op_ListMeetings.go @@ -0,0 +1,219 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListMeetingsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListMeetingsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMeetingsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListMeetingsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListMeetingsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListMeetingsOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK meeting information. + Meetings []Meeting `type:"list"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListMeetingsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListMeetingsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Meetings != nil { + v := s.Meetings + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Meetings", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListMeetings = "ListMeetings" + +// ListMeetingsRequest returns a request value for making API operation for +// Amazon Chime. +// +// Lists up to 100 active Amazon Chime SDK meetings. For more information about +// the Amazon Chime SDK, see Using the Amazon Chime SDK (https://docs.aws.amazon.com/chime/latest/dg/meetings-sdk.html) +// in the Amazon Chime Developer Guide. +// +// // Example sending a request using ListMeetingsRequest. +// req := client.ListMeetingsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/ListMeetings +func (c *Client) ListMeetingsRequest(input *ListMeetingsInput) ListMeetingsRequest { + op := &aws.Operation{ + Name: opListMeetings, + HTTPMethod: "GET", + HTTPPath: "/meetings", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMeetingsInput{} + } + + req := c.newRequest(op, input, &ListMeetingsOutput{}) + return ListMeetingsRequest{Request: req, Input: input, Copy: c.ListMeetingsRequest} +} + +// ListMeetingsRequest is the request type for the +// ListMeetings API operation. +type ListMeetingsRequest struct { + *aws.Request + Input *ListMeetingsInput + Copy func(*ListMeetingsInput) ListMeetingsRequest +} + +// Send marshals and sends the ListMeetings API request. +func (r ListMeetingsRequest) Send(ctx context.Context) (*ListMeetingsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListMeetingsResponse{ + ListMeetingsOutput: r.Request.Data.(*ListMeetingsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListMeetingsRequestPaginator returns a paginator for ListMeetings. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListMeetingsRequest(input) +// p := chime.NewListMeetingsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListMeetingsPaginator(req ListMeetingsRequest) ListMeetingsPaginator { + return ListMeetingsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListMeetingsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListMeetingsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListMeetingsPaginator struct { + aws.Pager +} + +func (p *ListMeetingsPaginator) CurrentPage() *ListMeetingsOutput { + return p.Pager.CurrentPage().(*ListMeetingsOutput) +} + +// ListMeetingsResponse is the response type for the +// ListMeetings API operation. +type ListMeetingsResponse struct { + *ListMeetingsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListMeetings request. +func (r *ListMeetingsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_ListRoomMemberships.go b/service/chime/api_op_ListRoomMemberships.go new file mode 100644 index 00000000000..e8d01e29468 --- /dev/null +++ b/service/chime/api_op_ListRoomMemberships.go @@ -0,0 +1,248 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListRoomMembershipsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRoomMembershipsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRoomMembershipsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListRoomMembershipsInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRoomMembershipsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListRoomMembershipsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` + + // The room membership details. + RoomMemberships []RoomMembership `type:"list"` +} + +// String returns the string representation +func (s ListRoomMembershipsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRoomMembershipsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomMemberships != nil { + v := s.RoomMemberships + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "RoomMemberships", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opListRoomMemberships = "ListRoomMemberships" + +// ListRoomMembershipsRequest returns a request value for making API operation for +// Amazon Chime. +// +// Lists the membership details for the specified room, such as member IDs, +// member email addresses, and member names. +// +// // Example sending a request using ListRoomMembershipsRequest. +// req := client.ListRoomMembershipsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/ListRoomMemberships +func (c *Client) ListRoomMembershipsRequest(input *ListRoomMembershipsInput) ListRoomMembershipsRequest { + op := &aws.Operation{ + Name: opListRoomMemberships, + HTTPMethod: "GET", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}/memberships", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRoomMembershipsInput{} + } + + req := c.newRequest(op, input, &ListRoomMembershipsOutput{}) + return ListRoomMembershipsRequest{Request: req, Input: input, Copy: c.ListRoomMembershipsRequest} +} + +// ListRoomMembershipsRequest is the request type for the +// ListRoomMemberships API operation. +type ListRoomMembershipsRequest struct { + *aws.Request + Input *ListRoomMembershipsInput + Copy func(*ListRoomMembershipsInput) ListRoomMembershipsRequest +} + +// Send marshals and sends the ListRoomMemberships API request. +func (r ListRoomMembershipsRequest) Send(ctx context.Context) (*ListRoomMembershipsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListRoomMembershipsResponse{ + ListRoomMembershipsOutput: r.Request.Data.(*ListRoomMembershipsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListRoomMembershipsRequestPaginator returns a paginator for ListRoomMemberships. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListRoomMembershipsRequest(input) +// p := chime.NewListRoomMembershipsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListRoomMembershipsPaginator(req ListRoomMembershipsRequest) ListRoomMembershipsPaginator { + return ListRoomMembershipsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListRoomMembershipsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListRoomMembershipsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListRoomMembershipsPaginator struct { + aws.Pager +} + +func (p *ListRoomMembershipsPaginator) CurrentPage() *ListRoomMembershipsOutput { + return p.Pager.CurrentPage().(*ListRoomMembershipsOutput) +} + +// ListRoomMembershipsResponse is the response type for the +// ListRoomMemberships API operation. +type ListRoomMembershipsResponse struct { + *ListRoomMembershipsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListRoomMemberships request. +func (r *ListRoomMembershipsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_ListRooms.go b/service/chime/api_op_ListRooms.go new file mode 100644 index 00000000000..3f0236e5e9d --- /dev/null +++ b/service/chime/api_op_ListRooms.go @@ -0,0 +1,243 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListRoomsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The member ID (user ID or bot ID). + MemberId *string `location:"querystring" locationName:"member-id" type:"string"` + + // The token to use to retrieve the next page of results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListRoomsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRoomsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListRoomsInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRoomsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "member-id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListRoomsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` + + // The room details. + Rooms []Room `type:"list"` +} + +// String returns the string representation +func (s ListRoomsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRoomsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Rooms != nil { + v := s.Rooms + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Rooms", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opListRooms = "ListRooms" + +// ListRoomsRequest returns a request value for making API operation for +// Amazon Chime. +// +// Lists the room details for the specified Amazon Chime account. Optionally, +// filter the results by a member ID (user ID or bot ID) to see a list of rooms +// that the member belongs to. +// +// // Example sending a request using ListRoomsRequest. +// req := client.ListRoomsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/ListRooms +func (c *Client) ListRoomsRequest(input *ListRoomsInput) ListRoomsRequest { + op := &aws.Operation{ + Name: opListRooms, + HTTPMethod: "GET", + HTTPPath: "/accounts/{accountId}/rooms", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRoomsInput{} + } + + req := c.newRequest(op, input, &ListRoomsOutput{}) + return ListRoomsRequest{Request: req, Input: input, Copy: c.ListRoomsRequest} +} + +// ListRoomsRequest is the request type for the +// ListRooms API operation. +type ListRoomsRequest struct { + *aws.Request + Input *ListRoomsInput + Copy func(*ListRoomsInput) ListRoomsRequest +} + +// Send marshals and sends the ListRooms API request. +func (r ListRoomsRequest) Send(ctx context.Context) (*ListRoomsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListRoomsResponse{ + ListRoomsOutput: r.Request.Data.(*ListRoomsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListRoomsRequestPaginator returns a paginator for ListRooms. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListRoomsRequest(input) +// p := chime.NewListRoomsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListRoomsPaginator(req ListRoomsRequest) ListRoomsPaginator { + return ListRoomsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListRoomsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListRoomsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListRoomsPaginator struct { + aws.Pager +} + +func (p *ListRoomsPaginator) CurrentPage() *ListRoomsOutput { + return p.Pager.CurrentPage().(*ListRoomsOutput) +} + +// ListRoomsResponse is the response type for the +// ListRooms API operation. +type ListRoomsResponse struct { + *ListRoomsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListRooms request. +func (r *ListRoomsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_PutVoiceConnectorStreamingConfiguration.go b/service/chime/api_op_PutVoiceConnectorStreamingConfiguration.go index 894747f42cd..d274169e272 100644 --- a/service/chime/api_op_PutVoiceConnectorStreamingConfiguration.go +++ b/service/chime/api_op_PutVoiceConnectorStreamingConfiguration.go @@ -101,8 +101,8 @@ const opPutVoiceConnectorStreamingConfiguration = "PutVoiceConnectorStreamingCon // // Adds a streaming configuration for the specified Amazon Chime Voice Connector. // The streaming configuration specifies whether media streaming is enabled -// for sending to Amazon Kinesis, and sets the retention period for the Amazon -// Kinesis data, in hours. +// for sending to Amazon Kinesis. It also sets the retention period, in hours, +// for the Amazon Kinesis data. // // // Example sending a request using PutVoiceConnectorStreamingConfigurationRequest. // req := client.PutVoiceConnectorStreamingConfigurationRequest(params) diff --git a/service/chime/api_op_UpdatePhoneNumberSettings.go b/service/chime/api_op_UpdatePhoneNumberSettings.go index 999f21b9ab0..579c24440d6 100644 --- a/service/chime/api_op_UpdatePhoneNumberSettings.go +++ b/service/chime/api_op_UpdatePhoneNumberSettings.go @@ -74,7 +74,7 @@ const opUpdatePhoneNumberSettings = "UpdatePhoneNumberSettings" // Updates the phone number settings for the administrator's AWS account, such // as the default outbound calling name. You can update the default outbound // calling name once every seven days. Outbound calling names can take up to -// 72 hours to be updated. +// 72 hours to update. // // // Example sending a request using UpdatePhoneNumberSettingsRequest. // req := client.UpdatePhoneNumberSettingsRequest(params) diff --git a/service/chime/api_op_UpdateRoom.go b/service/chime/api_op_UpdateRoom.go new file mode 100644 index 00000000000..232ecb33e8b --- /dev/null +++ b/service/chime/api_op_UpdateRoom.go @@ -0,0 +1,167 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateRoomInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The room name. + Name *string `type:"string" sensitive:"true"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRoomInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRoomInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateRoomInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateRoomInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateRoomOutput struct { + _ struct{} `type:"structure"` + + // The room details. + Room *Room `type:"structure"` +} + +// String returns the string representation +func (s UpdateRoomOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateRoomOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Room != nil { + v := s.Room + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Room", v, metadata) + } + return nil +} + +const opUpdateRoom = "UpdateRoom" + +// UpdateRoomRequest returns a request value for making API operation for +// Amazon Chime. +// +// Updates room details, such as the room name. +// +// // Example sending a request using UpdateRoomRequest. +// req := client.UpdateRoomRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/UpdateRoom +func (c *Client) UpdateRoomRequest(input *UpdateRoomInput) UpdateRoomRequest { + op := &aws.Operation{ + Name: opUpdateRoom, + HTTPMethod: "POST", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}", + } + + if input == nil { + input = &UpdateRoomInput{} + } + + req := c.newRequest(op, input, &UpdateRoomOutput{}) + return UpdateRoomRequest{Request: req, Input: input, Copy: c.UpdateRoomRequest} +} + +// UpdateRoomRequest is the request type for the +// UpdateRoom API operation. +type UpdateRoomRequest struct { + *aws.Request + Input *UpdateRoomInput + Copy func(*UpdateRoomInput) UpdateRoomRequest +} + +// Send marshals and sends the UpdateRoom API request. +func (r UpdateRoomRequest) Send(ctx context.Context) (*UpdateRoomResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateRoomResponse{ + UpdateRoomOutput: r.Request.Data.(*UpdateRoomOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateRoomResponse is the response type for the +// UpdateRoom API operation. +type UpdateRoomResponse struct { + *UpdateRoomOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateRoom request. +func (r *UpdateRoomResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_op_UpdateRoomMembership.go b/service/chime/api_op_UpdateRoomMembership.go new file mode 100644 index 00000000000..0c0018b8714 --- /dev/null +++ b/service/chime/api_op_UpdateRoomMembership.go @@ -0,0 +1,184 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package chime + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateRoomMembershipInput struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + // + // AccountId is a required field + AccountId *string `location:"uri" locationName:"accountId" type:"string" required:"true"` + + // The member ID. + // + // MemberId is a required field + MemberId *string `location:"uri" locationName:"memberId" type:"string" required:"true"` + + // The role of the member. + Role RoomMembershipRole `type:"string" enum:"true"` + + // The room ID. + // + // RoomId is a required field + RoomId *string `location:"uri" locationName:"roomId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRoomMembershipInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRoomMembershipInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateRoomMembershipInput"} + + if s.AccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountId")) + } + + if s.MemberId == nil { + invalidParams.Add(aws.NewErrParamRequired("MemberId")) + } + + if s.RoomId == nil { + invalidParams.Add(aws.NewErrParamRequired("RoomId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateRoomMembershipInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if len(s.Role) > 0 { + v := s.Role + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Role", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "accountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "memberId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "roomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateRoomMembershipOutput struct { + _ struct{} `type:"structure"` + + // The room membership details. + RoomMembership *RoomMembership `type:"structure"` +} + +// String returns the string representation +func (s UpdateRoomMembershipOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateRoomMembershipOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.RoomMembership != nil { + v := s.RoomMembership + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RoomMembership", v, metadata) + } + return nil +} + +const opUpdateRoomMembership = "UpdateRoomMembership" + +// UpdateRoomMembershipRequest returns a request value for making API operation for +// Amazon Chime. +// +// Updates room membership details, such as member role. The member role designates +// whether the member is a chat room administrator or a general chat room member. +// Member role can only be updated for user IDs. +// +// // Example sending a request using UpdateRoomMembershipRequest. +// req := client.UpdateRoomMembershipRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/chime-2018-05-01/UpdateRoomMembership +func (c *Client) UpdateRoomMembershipRequest(input *UpdateRoomMembershipInput) UpdateRoomMembershipRequest { + op := &aws.Operation{ + Name: opUpdateRoomMembership, + HTTPMethod: "POST", + HTTPPath: "/accounts/{accountId}/rooms/{roomId}/memberships/{memberId}", + } + + if input == nil { + input = &UpdateRoomMembershipInput{} + } + + req := c.newRequest(op, input, &UpdateRoomMembershipOutput{}) + return UpdateRoomMembershipRequest{Request: req, Input: input, Copy: c.UpdateRoomMembershipRequest} +} + +// UpdateRoomMembershipRequest is the request type for the +// UpdateRoomMembership API operation. +type UpdateRoomMembershipRequest struct { + *aws.Request + Input *UpdateRoomMembershipInput + Copy func(*UpdateRoomMembershipInput) UpdateRoomMembershipRequest +} + +// Send marshals and sends the UpdateRoomMembership API request. +func (r UpdateRoomMembershipRequest) Send(ctx context.Context) (*UpdateRoomMembershipResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateRoomMembershipResponse{ + UpdateRoomMembershipOutput: r.Request.Data.(*UpdateRoomMembershipOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateRoomMembershipResponse is the response type for the +// UpdateRoomMembership API operation. +type UpdateRoomMembershipResponse struct { + *UpdateRoomMembershipOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateRoomMembership request. +func (r *UpdateRoomMembershipResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/chime/api_types.go b/service/chime/api_types.go index 850d4f97538..4140162ef09 100644 --- a/service/chime/api_types.go +++ b/service/chime/api_types.go @@ -147,6 +147,56 @@ func (s AccountSettings) MarshalFields(e protocol.FieldEncoder) error { return nil } +// An Amazon Chime SDK meeting attendee. Includes a unique AttendeeId and JoinToken. +// The JoinToken allows a client to authenticate and join as the specified attendee. +// The JoinToken expires when the meeting ends or when DeleteAttendee is called. +// After that, the attendee is unable to join the meeting. +// +// We recommend securely transferring each JoinToken from your server application +// to the client so that no other client has access to the token except for +// the one authorized to represent the attendee. +type Attendee struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK attendee ID. + AttendeeId *string `type:"string"` + + // The Amazon Chime SDK external user ID. Links the attendee to an identity + // managed by a builder application. + ExternalUserId *string `min:"2" type:"string" sensitive:"true"` + + // The join token used by the Amazon Chime SDK attendee. + JoinToken *string `min:"2" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s Attendee) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Attendee) MarshalFields(e protocol.FieldEncoder) error { + if s.AttendeeId != nil { + v := *s.AttendeeId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AttendeeId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ExternalUserId != nil { + v := *s.ExternalUserId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ExternalUserId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.JoinToken != nil { + v := *s.JoinToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "JoinToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + // A resource that allows Enterprise account administrators to configure an // interface to receive events from Amazon Chime. type Bot struct { @@ -272,6 +322,96 @@ func (s BusinessCallingSettings) MarshalFields(e protocol.FieldEncoder) error { return nil } +// The list of errors returned when errors are encountered during the BatchCreateAttendee +// and CreateAttendee actions. This includes external user IDs, error codes, +// and error messages. +type CreateAttendeeError struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode *string `type:"string"` + + // The error message. + ErrorMessage *string `type:"string"` + + // The Amazon Chime SDK external user ID. Links the attendee to an identity + // managed by a builder application. + ExternalUserId *string `min:"2" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s CreateAttendeeError) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateAttendeeError) MarshalFields(e protocol.FieldEncoder) error { + if s.ErrorCode != nil { + v := *s.ErrorCode + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ErrorCode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ErrorMessage != nil { + v := *s.ErrorMessage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ErrorMessage", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ExternalUserId != nil { + v := *s.ExternalUserId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ExternalUserId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The Amazon Chime SDK attendee fields to create, used with the BatchCreateAttendee +// action. +type CreateAttendeeRequestItem struct { + _ struct{} `type:"structure"` + + // The Amazon Chime SDK external user ID. Links the attendee to an identity + // managed by a builder application. + // + // ExternalUserId is a required field + ExternalUserId *string `min:"2" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s CreateAttendeeRequestItem) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAttendeeRequestItem) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateAttendeeRequestItem"} + + if s.ExternalUserId == nil { + invalidParams.Add(aws.NewErrParamRequired("ExternalUserId")) + } + if s.ExternalUserId != nil && len(*s.ExternalUserId) < 2 { + invalidParams.Add(aws.NewErrParamMinLen("ExternalUserId", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateAttendeeRequestItem) MarshalFields(e protocol.FieldEncoder) error { + if s.ExternalUserId != nil { + v := *s.ExternalUserId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ExternalUserId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + // The SIP credentials used to authenticate requests to your Amazon Chime Voice // Connector. type Credential struct { @@ -429,6 +569,303 @@ func (s LoggingConfiguration) MarshalFields(e protocol.FieldEncoder) error { return nil } +// A set of endpoints used by clients to connect to the media service group +// for a Amazon Chime SDK meeting. +type MediaPlacement struct { + _ struct{} `type:"structure"` + + // The audio host URL. + AudioHostUrl *string `type:"string"` + + // The screen data URL. + ScreenDataUrl *string `type:"string"` + + // The screen sharing URL. + ScreenSharingUrl *string `type:"string"` + + // The screen viewing URL. + ScreenViewingUrl *string `type:"string"` + + // The signaling URL. + SignalingUrl *string `type:"string"` + + // The turn control URL. + TurnControlUrl *string `type:"string"` +} + +// String returns the string representation +func (s MediaPlacement) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MediaPlacement) MarshalFields(e protocol.FieldEncoder) error { + if s.AudioHostUrl != nil { + v := *s.AudioHostUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AudioHostUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ScreenDataUrl != nil { + v := *s.ScreenDataUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ScreenDataUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ScreenSharingUrl != nil { + v := *s.ScreenSharingUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ScreenSharingUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ScreenViewingUrl != nil { + v := *s.ScreenViewingUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ScreenViewingUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SignalingUrl != nil { + v := *s.SignalingUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SignalingUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TurnControlUrl != nil { + v := *s.TurnControlUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TurnControlUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A meeting created using the Amazon Chime SDK. +type Meeting struct { + _ struct{} `type:"structure"` + + // The media placement for the meeting. + MediaPlacement *MediaPlacement `type:"structure"` + + // The Region in which to create the meeting. Available values: us-east-1, us-west-2. + MediaRegion *string `type:"string"` + + // The Amazon Chime SDK meeting ID. + MeetingId *string `type:"string"` +} + +// String returns the string representation +func (s Meeting) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Meeting) MarshalFields(e protocol.FieldEncoder) error { + if s.MediaPlacement != nil { + v := s.MediaPlacement + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "MediaPlacement", v, metadata) + } + if s.MediaRegion != nil { + v := *s.MediaRegion + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MediaRegion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MeetingId != nil { + v := *s.MeetingId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MeetingId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The configuration for resource targets to receive notifications when Amazon +// Chime SDK meeting and attendee events occur. +type MeetingNotificationConfiguration struct { + _ struct{} `type:"structure"` + + // The SNS topic ARN. + SnsTopicArn *string `min:"1" type:"string" sensitive:"true"` + + // The SQS queue ARN. + SqsQueueArn *string `min:"1" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s MeetingNotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MeetingNotificationConfiguration) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "MeetingNotificationConfiguration"} + if s.SnsTopicArn != nil && len(*s.SnsTopicArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SnsTopicArn", 1)) + } + if s.SqsQueueArn != nil && len(*s.SqsQueueArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SqsQueueArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MeetingNotificationConfiguration) MarshalFields(e protocol.FieldEncoder) error { + if s.SnsTopicArn != nil { + v := *s.SnsTopicArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SnsTopicArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SqsQueueArn != nil { + v := *s.SqsQueueArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SqsQueueArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The member details, such as email address, name, member ID, and member type. +type Member struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + AccountId *string `type:"string"` + + // The member email address. + Email *string `type:"string" sensitive:"true"` + + // The member name. + FullName *string `type:"string" sensitive:"true"` + + // The member ID (user ID or bot ID). + MemberId *string `type:"string"` + + // The member type. + MemberType MemberType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s Member) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Member) MarshalFields(e protocol.FieldEncoder) error { + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Email != nil { + v := *s.Email + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Email", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.FullName != nil { + v := *s.FullName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "FullName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MemberId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.MemberType) > 0 { + v := s.MemberType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MemberType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// The list of errors returned when a member action results in an error. +type MemberError struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode ErrorCode `type:"string" enum:"true"` + + // The error message. + ErrorMessage *string `type:"string"` + + // The member ID. + MemberId *string `type:"string"` +} + +// String returns the string representation +func (s MemberError) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MemberError) MarshalFields(e protocol.FieldEncoder) error { + if len(s.ErrorCode) > 0 { + v := s.ErrorCode + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ErrorCode", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.ErrorMessage != nil { + v := *s.ErrorMessage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ErrorMessage", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MemberId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Membership details, such as member ID and member role. +type MembershipItem struct { + _ struct{} `type:"structure"` + + // The member ID. + MemberId *string `type:"string"` + + // The member role. + Role RoomMembershipRole `type:"string" enum:"true"` +} + +// String returns the string representation +func (s MembershipItem) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MembershipItem) MarshalFields(e protocol.FieldEncoder) error { + if s.MemberId != nil { + v := *s.MemberId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MemberId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Role) > 0 { + v := s.Role + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Role", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + // A phone number for which an order has been placed. type OrderedPhoneNumber struct { _ struct{} `type:"structure"` @@ -973,13 +1410,145 @@ func (s PhoneNumberOrder) MarshalFields(e protocol.FieldEncoder) error { return nil } +// The Amazon Chime chat room details. +type Room struct { + _ struct{} `type:"structure"` + + // The Amazon Chime account ID. + AccountId *string `type:"string"` + + // The identifier of the room creator. + CreatedBy *string `type:"string"` + + // The room creation timestamp, in ISO 8601 format. + CreatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The room name. + Name *string `type:"string" sensitive:"true"` + + // The room ID. + RoomId *string `type:"string"` + + // The room update timestamp, in ISO 8601 format. + UpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s Room) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Room) MarshalFields(e protocol.FieldEncoder) error { + if s.AccountId != nil { + v := *s.AccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedBy != nil { + v := *s.CreatedBy + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedBy", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTimestamp != nil { + v := *s.CreatedTimestamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTimestamp", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RoomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UpdatedTimestamp != nil { + v := *s.UpdatedTimestamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedTimestamp", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +// The room membership details. +type RoomMembership struct { + _ struct{} `type:"structure"` + + // The identifier of the user that invited the room member. + InvitedBy *string `type:"string"` + + // The member details, such as email address, name, member ID, and member type. + Member *Member `type:"structure"` + + // The membership role. + Role RoomMembershipRole `type:"string" enum:"true"` + + // The room ID. + RoomId *string `type:"string"` + + // The room membership update timestamp, in ISO 8601 format. + UpdatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s RoomMembership) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RoomMembership) MarshalFields(e protocol.FieldEncoder) error { + if s.InvitedBy != nil { + v := *s.InvitedBy + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "InvitedBy", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Member != nil { + v := s.Member + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Member", v, metadata) + } + if len(s.Role) > 0 { + v := s.Role + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Role", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.RoomId != nil { + v := *s.RoomId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RoomId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UpdatedTimestamp != nil { + v := *s.UpdatedTimestamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedTimestamp", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + // The streaming configuration associated with an Amazon Chime Voice Connector. // Specifies whether media streaming is enabled for sending to Amazon Kinesis, // and shows the retention period for the Amazon Kinesis data, in hours. type StreamingConfiguration struct { _ struct{} `type:"structure"` - // The retention period for the Amazon Kinesis data, in hours. + // The retention period, in hours, for the Amazon Kinesis data. // // DataRetentionInHours is a required field DataRetentionInHours *int64 `type:"integer" required:"true"` diff --git a/service/chime/chimeiface/interface.go b/service/chime/chimeiface/interface.go index 8404cb474b6..0b6f10ffcd0 100644 --- a/service/chime/chimeiface/interface.go +++ b/service/chime/chimeiface/interface.go @@ -67,6 +67,10 @@ type ClientAPI interface { AssociatePhoneNumbersWithVoiceConnectorGroupRequest(*chime.AssociatePhoneNumbersWithVoiceConnectorGroupInput) chime.AssociatePhoneNumbersWithVoiceConnectorGroupRequest + BatchCreateAttendeeRequest(*chime.BatchCreateAttendeeInput) chime.BatchCreateAttendeeRequest + + BatchCreateRoomMembershipRequest(*chime.BatchCreateRoomMembershipInput) chime.BatchCreateRoomMembershipRequest + BatchDeletePhoneNumberRequest(*chime.BatchDeletePhoneNumberInput) chime.BatchDeletePhoneNumberRequest BatchSuspendUserRequest(*chime.BatchSuspendUserInput) chime.BatchSuspendUserRequest @@ -79,20 +83,36 @@ type ClientAPI interface { CreateAccountRequest(*chime.CreateAccountInput) chime.CreateAccountRequest + CreateAttendeeRequest(*chime.CreateAttendeeInput) chime.CreateAttendeeRequest + CreateBotRequest(*chime.CreateBotInput) chime.CreateBotRequest + CreateMeetingRequest(*chime.CreateMeetingInput) chime.CreateMeetingRequest + CreatePhoneNumberOrderRequest(*chime.CreatePhoneNumberOrderInput) chime.CreatePhoneNumberOrderRequest + CreateRoomRequest(*chime.CreateRoomInput) chime.CreateRoomRequest + + CreateRoomMembershipRequest(*chime.CreateRoomMembershipInput) chime.CreateRoomMembershipRequest + CreateVoiceConnectorRequest(*chime.CreateVoiceConnectorInput) chime.CreateVoiceConnectorRequest CreateVoiceConnectorGroupRequest(*chime.CreateVoiceConnectorGroupInput) chime.CreateVoiceConnectorGroupRequest DeleteAccountRequest(*chime.DeleteAccountInput) chime.DeleteAccountRequest + DeleteAttendeeRequest(*chime.DeleteAttendeeInput) chime.DeleteAttendeeRequest + DeleteEventsConfigurationRequest(*chime.DeleteEventsConfigurationInput) chime.DeleteEventsConfigurationRequest + DeleteMeetingRequest(*chime.DeleteMeetingInput) chime.DeleteMeetingRequest + DeletePhoneNumberRequest(*chime.DeletePhoneNumberInput) chime.DeletePhoneNumberRequest + DeleteRoomRequest(*chime.DeleteRoomInput) chime.DeleteRoomRequest + + DeleteRoomMembershipRequest(*chime.DeleteRoomMembershipInput) chime.DeleteRoomMembershipRequest + DeleteVoiceConnectorRequest(*chime.DeleteVoiceConnectorInput) chime.DeleteVoiceConnectorRequest DeleteVoiceConnectorGroupRequest(*chime.DeleteVoiceConnectorGroupInput) chime.DeleteVoiceConnectorGroupRequest @@ -115,18 +135,24 @@ type ClientAPI interface { GetAccountSettingsRequest(*chime.GetAccountSettingsInput) chime.GetAccountSettingsRequest + GetAttendeeRequest(*chime.GetAttendeeInput) chime.GetAttendeeRequest + GetBotRequest(*chime.GetBotInput) chime.GetBotRequest GetEventsConfigurationRequest(*chime.GetEventsConfigurationInput) chime.GetEventsConfigurationRequest GetGlobalSettingsRequest(*chime.GetGlobalSettingsInput) chime.GetGlobalSettingsRequest + GetMeetingRequest(*chime.GetMeetingInput) chime.GetMeetingRequest + GetPhoneNumberRequest(*chime.GetPhoneNumberInput) chime.GetPhoneNumberRequest GetPhoneNumberOrderRequest(*chime.GetPhoneNumberOrderInput) chime.GetPhoneNumberOrderRequest GetPhoneNumberSettingsRequest(*chime.GetPhoneNumberSettingsInput) chime.GetPhoneNumberSettingsRequest + GetRoomRequest(*chime.GetRoomInput) chime.GetRoomRequest + GetUserRequest(*chime.GetUserInput) chime.GetUserRequest GetUserSettingsRequest(*chime.GetUserSettingsInput) chime.GetUserSettingsRequest @@ -149,12 +175,20 @@ type ClientAPI interface { ListAccountsRequest(*chime.ListAccountsInput) chime.ListAccountsRequest + ListAttendeesRequest(*chime.ListAttendeesInput) chime.ListAttendeesRequest + ListBotsRequest(*chime.ListBotsInput) chime.ListBotsRequest + ListMeetingsRequest(*chime.ListMeetingsInput) chime.ListMeetingsRequest + ListPhoneNumberOrdersRequest(*chime.ListPhoneNumberOrdersInput) chime.ListPhoneNumberOrdersRequest ListPhoneNumbersRequest(*chime.ListPhoneNumbersInput) chime.ListPhoneNumbersRequest + ListRoomMembershipsRequest(*chime.ListRoomMembershipsInput) chime.ListRoomMembershipsRequest + + ListRoomsRequest(*chime.ListRoomsInput) chime.ListRoomsRequest + ListUsersRequest(*chime.ListUsersInput) chime.ListUsersRequest ListVoiceConnectorGroupsRequest(*chime.ListVoiceConnectorGroupsInput) chime.ListVoiceConnectorGroupsRequest @@ -197,6 +231,10 @@ type ClientAPI interface { UpdatePhoneNumberSettingsRequest(*chime.UpdatePhoneNumberSettingsInput) chime.UpdatePhoneNumberSettingsRequest + UpdateRoomRequest(*chime.UpdateRoomInput) chime.UpdateRoomRequest + + UpdateRoomMembershipRequest(*chime.UpdateRoomMembershipInput) chime.UpdateRoomMembershipRequest + UpdateUserRequest(*chime.UpdateUserInput) chime.UpdateUserRequest UpdateUserSettingsRequest(*chime.UpdateUserSettingsInput) chime.UpdateUserSettingsRequest diff --git a/service/cloudformation/api_enums.go b/service/cloudformation/api_enums.go index 7acae068116..642897d4d40 100644 --- a/service/cloudformation/api_enums.go +++ b/service/cloudformation/api_enums.go @@ -131,6 +131,23 @@ func (enum ChangeType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type DeprecatedStatus string + +// Enum values for DeprecatedStatus +const ( + DeprecatedStatusLive DeprecatedStatus = "LIVE" + DeprecatedStatusDeprecated DeprecatedStatus = "DEPRECATED" +) + +func (enum DeprecatedStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DeprecatedStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type DifferenceType string // Enum values for DifferenceType @@ -187,6 +204,35 @@ func (enum ExecutionStatus) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type HandlerErrorCode string + +// Enum values for HandlerErrorCode +const ( + HandlerErrorCodeNotUpdatable HandlerErrorCode = "NotUpdatable" + HandlerErrorCodeInvalidRequest HandlerErrorCode = "InvalidRequest" + HandlerErrorCodeAccessDenied HandlerErrorCode = "AccessDenied" + HandlerErrorCodeInvalidCredentials HandlerErrorCode = "InvalidCredentials" + HandlerErrorCodeAlreadyExists HandlerErrorCode = "AlreadyExists" + HandlerErrorCodeNotFound HandlerErrorCode = "NotFound" + HandlerErrorCodeResourceConflict HandlerErrorCode = "ResourceConflict" + HandlerErrorCodeThrottling HandlerErrorCode = "Throttling" + HandlerErrorCodeServiceLimitExceeded HandlerErrorCode = "ServiceLimitExceeded" + HandlerErrorCodeNotStabilized HandlerErrorCode = "NotStabilized" + HandlerErrorCodeGeneralServiceException HandlerErrorCode = "GeneralServiceException" + HandlerErrorCodeServiceInternalError HandlerErrorCode = "ServiceInternalError" + HandlerErrorCodeNetworkFailure HandlerErrorCode = "NetworkFailure" + HandlerErrorCodeInternalFailure HandlerErrorCode = "InternalFailure" +) + +func (enum HandlerErrorCode) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum HandlerErrorCode) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type OnFailure string // Enum values for OnFailure @@ -205,6 +251,77 @@ func (enum OnFailure) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type OperationStatus string + +// Enum values for OperationStatus +const ( + OperationStatusPending OperationStatus = "PENDING" + OperationStatusInProgress OperationStatus = "IN_PROGRESS" + OperationStatusSuccess OperationStatus = "SUCCESS" + OperationStatusFailed OperationStatus = "FAILED" +) + +func (enum OperationStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum OperationStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type ProvisioningType string + +// Enum values for ProvisioningType +const ( + ProvisioningTypeNonProvisionable ProvisioningType = "NON_PROVISIONABLE" + ProvisioningTypeImmutable ProvisioningType = "IMMUTABLE" + ProvisioningTypeFullyMutable ProvisioningType = "FULLY_MUTABLE" +) + +func (enum ProvisioningType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ProvisioningType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type RegistrationStatus string + +// Enum values for RegistrationStatus +const ( + RegistrationStatusComplete RegistrationStatus = "COMPLETE" + RegistrationStatusInProgress RegistrationStatus = "IN_PROGRESS" + RegistrationStatusFailed RegistrationStatus = "FAILED" +) + +func (enum RegistrationStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum RegistrationStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type RegistryType string + +// Enum values for RegistryType +const ( + RegistryTypeResource RegistryType = "RESOURCE" +) + +func (enum RegistryType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum RegistryType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type Replacement string // Enum values for Replacement @@ -384,13 +501,52 @@ func (enum StackResourceDriftStatus) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type StackSetDriftDetectionStatus string + +// Enum values for StackSetDriftDetectionStatus +const ( + StackSetDriftDetectionStatusCompleted StackSetDriftDetectionStatus = "COMPLETED" + StackSetDriftDetectionStatusFailed StackSetDriftDetectionStatus = "FAILED" + StackSetDriftDetectionStatusPartialSuccess StackSetDriftDetectionStatus = "PARTIAL_SUCCESS" + StackSetDriftDetectionStatusInProgress StackSetDriftDetectionStatus = "IN_PROGRESS" + StackSetDriftDetectionStatusStopped StackSetDriftDetectionStatus = "STOPPED" +) + +func (enum StackSetDriftDetectionStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum StackSetDriftDetectionStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type StackSetDriftStatus string + +// Enum values for StackSetDriftStatus +const ( + StackSetDriftStatusDrifted StackSetDriftStatus = "DRIFTED" + StackSetDriftStatusInSync StackSetDriftStatus = "IN_SYNC" + StackSetDriftStatusNotChecked StackSetDriftStatus = "NOT_CHECKED" +) + +func (enum StackSetDriftStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum StackSetDriftStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type StackSetOperationAction string // Enum values for StackSetOperationAction const ( - StackSetOperationActionCreate StackSetOperationAction = "CREATE" - StackSetOperationActionUpdate StackSetOperationAction = "UPDATE" - StackSetOperationActionDelete StackSetOperationAction = "DELETE" + StackSetOperationActionCreate StackSetOperationAction = "CREATE" + StackSetOperationActionUpdate StackSetOperationAction = "UPDATE" + StackSetOperationActionDelete StackSetOperationAction = "DELETE" + StackSetOperationActionDetectDrift StackSetOperationAction = "DETECT_DRIFT" ) func (enum StackSetOperationAction) MarshalValue() (string, error) { @@ -512,3 +668,20 @@ func (enum TemplateStage) MarshalValueBuf(b []byte) ([]byte, error) { b = b[0:0] return append(b, enum...), nil } + +type Visibility string + +// Enum values for Visibility +const ( + VisibilityPublic Visibility = "PUBLIC" + VisibilityPrivate Visibility = "PRIVATE" +) + +func (enum Visibility) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum Visibility) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} diff --git a/service/cloudformation/api_errors.go b/service/cloudformation/api_errors.go index 9ced8d7bd6c..f2312e9fb76 100644 --- a/service/cloudformation/api_errors.go +++ b/service/cloudformation/api_errors.go @@ -10,6 +10,12 @@ const ( // The resource with the name requested already exists. ErrCodeAlreadyExistsException = "AlreadyExistsException" + // ErrCodeCFNRegistryException for service response error code + // "CFNRegistryException". + // + // An error occurred during a CloudFormation registry operation. + ErrCodeCFNRegistryException = "CFNRegistryException" + // ErrCodeChangeSetNotFoundException for service response error code // "ChangeSetNotFound". // @@ -44,6 +50,13 @@ const ( // The specified operation isn't valid. ErrCodeInvalidOperationException = "InvalidOperationException" + // ErrCodeInvalidStateTransitionException for service response error code + // "InvalidStateTransition". + // + // Error reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + // CloudFormation does not return this error to users. + ErrCodeInvalidStateTransitionException = "InvalidStateTransition" + // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // @@ -78,6 +91,13 @@ const ( // The specified ID refers to an operation that doesn't exist. ErrCodeOperationNotFoundException = "OperationNotFoundException" + // ErrCodeOperationStatusCheckFailedException for service response error code + // "ConditionalCheckFailed". + // + // Error reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + // CloudFormation does not return this error to users. + ErrCodeOperationStatusCheckFailedException = "ConditionalCheckFailed" + // ErrCodeStackInstanceNotFoundException for service response error code // "StackInstanceNotFoundException". // @@ -110,4 +130,10 @@ const ( // // A client request token already exists. ErrCodeTokenAlreadyExistsException = "TokenAlreadyExistsException" + + // ErrCodeTypeNotFoundException for service response error code + // "TypeNotFoundException". + // + // The specified type does not exist in the CloudFormation registry. + ErrCodeTypeNotFoundException = "TypeNotFoundException" ) diff --git a/service/cloudformation/api_integ_test.go b/service/cloudformation/api_integ_test.go index 1969917723b..da02286ceea 100644 --- a/service/cloudformation/api_integ_test.go +++ b/service/cloudformation/api_integ_test.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/awserr" + "github.com/aws/aws-sdk-go-v2/aws/defaults" "github.com/aws/aws-sdk-go-v2/internal/awstesting/integration" "github.com/aws/aws-sdk-go-v2/service/cloudformation" ) @@ -27,7 +28,7 @@ func TestInteg_00_ListStacks(t *testing.T) { params := &cloudformation.ListStacksInput{} req := svc.ListStacksRequest(params) - + req.Handlers.Validate.Remove(defaults.ValidateParametersHandler) _, err := req.Send(ctx) if err != nil { t.Errorf("expect no error, got %v", err) @@ -45,7 +46,7 @@ func TestInteg_01_CreateStack(t *testing.T) { } req := svc.CreateStackRequest(params) - + req.Handlers.Validate.Remove(defaults.ValidateParametersHandler) _, err := req.Send(ctx) if err == nil { t.Fatalf("expect request to fail") diff --git a/service/cloudformation/api_op_DeregisterType.go b/service/cloudformation/api_op_DeregisterType.go new file mode 100644 index 00000000000..3eb05290e9b --- /dev/null +++ b/service/cloudformation/api_op_DeregisterType.go @@ -0,0 +1,142 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DeregisterTypeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the type. + // + // Conditional: You must specify TypeName or Arn. + Arn *string `type:"string"` + + // The kind of type. + // + // Currently the only valid value is RESOURCE. + Type RegistryType `type:"string" enum:"true"` + + // The name of the type. + // + // Conditional: You must specify TypeName or Arn. + TypeName *string `min:"10" type:"string"` + + // The ID of a specific version of the type. The version ID is the value at + // the end of the Amazon Resource Name (ARN) assigned to the type version when + // it is registered. + VersionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeregisterTypeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterTypeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeregisterTypeInput"} + if s.TypeName != nil && len(*s.TypeName) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 10)) + } + if s.VersionId != nil && len(*s.VersionId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("VersionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterTypeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterTypeOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeregisterType = "DeregisterType" + +// DeregisterTypeRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Removes a type or type version from active use in the CloudFormation registry. +// If a type or type version is deregistered, it cannot be used in CloudFormation +// operations. +// +// To deregister a type, you must individually deregister all registered versions +// of that type. If a type has only a single registered version, deregistering +// that version results in the type itself being deregistered. +// +// You cannot deregister the default version of a type, unless it is the only +// registered version of that type, in which case the type itself is deregistered +// as well. +// +// // Example sending a request using DeregisterTypeRequest. +// req := client.DeregisterTypeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DeregisterType +func (c *Client) DeregisterTypeRequest(input *DeregisterTypeInput) DeregisterTypeRequest { + op := &aws.Operation{ + Name: opDeregisterType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterTypeInput{} + } + + req := c.newRequest(op, input, &DeregisterTypeOutput{}) + return DeregisterTypeRequest{Request: req, Input: input, Copy: c.DeregisterTypeRequest} +} + +// DeregisterTypeRequest is the request type for the +// DeregisterType API operation. +type DeregisterTypeRequest struct { + *aws.Request + Input *DeregisterTypeInput + Copy func(*DeregisterTypeInput) DeregisterTypeRequest +} + +// Send marshals and sends the DeregisterType API request. +func (r DeregisterTypeRequest) Send(ctx context.Context) (*DeregisterTypeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeregisterTypeResponse{ + DeregisterTypeOutput: r.Request.Data.(*DeregisterTypeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeregisterTypeResponse is the response type for the +// DeregisterType API operation. +type DeregisterTypeResponse struct { + *DeregisterTypeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeregisterType request. +func (r *DeregisterTypeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_DescribeType.go b/service/cloudformation/api_op_DescribeType.go new file mode 100644 index 00000000000..76f4a4a5532 --- /dev/null +++ b/service/cloudformation/api_op_DescribeType.go @@ -0,0 +1,227 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeTypeInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the type. + // + // Conditional: You must specify TypeName or Arn. + Arn *string `type:"string"` + + // The kind of type. + // + // Currently the only valid value is RESOURCE. + Type RegistryType `type:"string" enum:"true"` + + // The name of the type. + // + // Conditional: You must specify TypeName or Arn. + TypeName *string `min:"10" type:"string"` + + // The ID of a specific version of the type. The version ID is the value at + // the end of the Amazon Resource Name (ARN) assigned to the type version when + // it is registered. + // + // If you specify a VersionId, DescribeType returns information about that specific + // type version. Otherwise, it returns information about the default type version. + VersionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeTypeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTypeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeTypeInput"} + if s.TypeName != nil && len(*s.TypeName) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 10)) + } + if s.VersionId != nil && len(*s.VersionId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("VersionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeTypeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the type. + Arn *string `type:"string"` + + // The ID of the default version of the type. The default version is used when + // the type version is not specified. + // + // To set the default version of a type, use SetTypeDefaultVersion . + DefaultVersionId *string `min:"1" type:"string"` + + // The deprecation status of the type. + // + // Valid values include: + // + // * LIVE: The type is registered and can be used in CloudFormation operations, + // dependent on its provisioning behavior and visibility scope. + // + // * DEPRECATED: The type has been deregistered and can no longer be used + // in CloudFormation operations. + DeprecatedStatus DeprecatedStatus `type:"string" enum:"true"` + + // The description of the registered type. + Description *string `min:"1" type:"string"` + + // The URL of a page providing detailed documentation for this type. + DocumentationUrl *string `type:"string"` + + // The Amazon Resource Name (ARN) of the IAM execution role used to register + // the type. If your resource type calls AWS APIs in any of its handlers, you + // must create an IAM execution role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) + // that includes the necessary permissions to call those AWS APIs, and provision + // that execution role in your account. CloudFormation then assumes that execution + // role to provide your resource type with the appropriate credentials. + ExecutionRoleArn *string `min:"1" type:"string"` + + // When the specified type version was registered. + LastUpdated *time.Time `type:"timestamp"` + + // Contains logging configuration information for a type. + LoggingConfig *LoggingConfig `type:"structure"` + + // The provisioning behavior of the type. AWS CloudFormation determines the + // provisioning type during registration, based on the types of handlers in + // the schema handler package submitted. + // + // Valid values include: + // + // * FULLY_MUTABLE: The type includes an update handler to process updates + // to the type during stack update operations. + // + // * IMMUTABLE: The type does not include an update handler, so the type + // cannot be updated and must instead be replaced during stack update operations. + // + // * NON_PROVISIONABLE: The type does not include all of the following handlers, + // and therefore cannot actually be provisioned. create read delete + ProvisioningType ProvisioningType `type:"string" enum:"true"` + + // The schema that defines the type. + // + // For more information on type schemas, see Resource Provider Schema (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-schema.html) + // in the CloudFormation CLI User Guide. + Schema *string `min:"1" type:"string"` + + // The URL of the source code for the type. + SourceUrl *string `type:"string"` + + // When the specified type version was registered. + TimeCreated *time.Time `type:"timestamp"` + + // The kind of type. + // + // Currently the only valid value is RESOURCE. + Type RegistryType `type:"string" enum:"true"` + + // The name of the registered type. + TypeName *string `min:"10" type:"string"` + + // The scope at which the type is visible and usable in CloudFormation operations. + // + // Valid values include: + // + // * PRIVATE: The type is only visible and usable within the account in which + // it is registered. Currently, AWS CloudFormation marks any types you register + // as PRIVATE. + // + // * PUBLIC: The type is publically visible and usable within any Amazon + // account. + Visibility Visibility `type:"string" enum:"true"` +} + +// String returns the string representation +func (s DescribeTypeOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeType = "DescribeType" + +// DescribeTypeRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Returns detailed information about a type that has been registered. +// +// If you specify a VersionId, DescribeType returns information about that specific +// type version. Otherwise, it returns information about the default type version. +// +// // Example sending a request using DescribeTypeRequest. +// req := client.DescribeTypeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeType +func (c *Client) DescribeTypeRequest(input *DescribeTypeInput) DescribeTypeRequest { + op := &aws.Operation{ + Name: opDescribeType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTypeInput{} + } + + req := c.newRequest(op, input, &DescribeTypeOutput{}) + return DescribeTypeRequest{Request: req, Input: input, Copy: c.DescribeTypeRequest} +} + +// DescribeTypeRequest is the request type for the +// DescribeType API operation. +type DescribeTypeRequest struct { + *aws.Request + Input *DescribeTypeInput + Copy func(*DescribeTypeInput) DescribeTypeRequest +} + +// Send marshals and sends the DescribeType API request. +func (r DescribeTypeRequest) Send(ctx context.Context) (*DescribeTypeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeTypeResponse{ + DescribeTypeOutput: r.Request.Data.(*DescribeTypeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeTypeResponse is the response type for the +// DescribeType API operation. +type DescribeTypeResponse struct { + *DescribeTypeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeType request. +func (r *DescribeTypeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_DescribeTypeRegistration.go b/service/cloudformation/api_op_DescribeTypeRegistration.go new file mode 100644 index 00000000000..97b89ebb014 --- /dev/null +++ b/service/cloudformation/api_op_DescribeTypeRegistration.go @@ -0,0 +1,148 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeTypeRegistrationInput struct { + _ struct{} `type:"structure"` + + // The identifier for this registration request. + // + // This registration token is generated by CloudFormation when you initiate + // a registration request using RegisterType . + // + // RegistrationToken is a required field + RegistrationToken *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTypeRegistrationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTypeRegistrationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeTypeRegistrationInput"} + + if s.RegistrationToken == nil { + invalidParams.Add(aws.NewErrParamRequired("RegistrationToken")) + } + if s.RegistrationToken != nil && len(*s.RegistrationToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RegistrationToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeTypeRegistrationOutput struct { + _ struct{} `type:"structure"` + + // The description of the type registration request. + Description *string `min:"1" type:"string"` + + // The current status of the type registration request. + ProgressStatus RegistrationStatus `type:"string" enum:"true"` + + // The Amazon Resource Name (ARN) of the type being registered. + // + // For registration requests with a ProgressStatus of other than COMPLETE, this + // will be null. + TypeArn *string `type:"string"` + + // The Amazon Resource Name (ARN) of this specific version of the type being + // registered. + // + // For registration requests with a ProgressStatus of other than COMPLETE, this + // will be null. + TypeVersionArn *string `type:"string"` +} + +// String returns the string representation +func (s DescribeTypeRegistrationOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeTypeRegistration = "DescribeTypeRegistration" + +// DescribeTypeRegistrationRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Returns information about a type's registration, including its current status +// and type and version identifiers. +// +// When you initiate a registration request using RegisterType , you can then +// use DescribeTypeRegistration to monitor the progress of that registration +// request. +// +// Once the registration request has completed, use DescribeType to return detailed +// informaiton about a type. +// +// // Example sending a request using DescribeTypeRegistrationRequest. +// req := client.DescribeTypeRegistrationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeTypeRegistration +func (c *Client) DescribeTypeRegistrationRequest(input *DescribeTypeRegistrationInput) DescribeTypeRegistrationRequest { + op := &aws.Operation{ + Name: opDescribeTypeRegistration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTypeRegistrationInput{} + } + + req := c.newRequest(op, input, &DescribeTypeRegistrationOutput{}) + return DescribeTypeRegistrationRequest{Request: req, Input: input, Copy: c.DescribeTypeRegistrationRequest} +} + +// DescribeTypeRegistrationRequest is the request type for the +// DescribeTypeRegistration API operation. +type DescribeTypeRegistrationRequest struct { + *aws.Request + Input *DescribeTypeRegistrationInput + Copy func(*DescribeTypeRegistrationInput) DescribeTypeRegistrationRequest +} + +// Send marshals and sends the DescribeTypeRegistration API request. +func (r DescribeTypeRegistrationRequest) Send(ctx context.Context) (*DescribeTypeRegistrationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeTypeRegistrationResponse{ + DescribeTypeRegistrationOutput: r.Request.Data.(*DescribeTypeRegistrationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeTypeRegistrationResponse is the response type for the +// DescribeTypeRegistration API operation. +type DescribeTypeRegistrationResponse struct { + *DescribeTypeRegistrationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeTypeRegistration request. +func (r *DescribeTypeRegistrationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_DetectStackSetDrift.go b/service/cloudformation/api_op_DetectStackSetDrift.go new file mode 100644 index 00000000000..4c791495107 --- /dev/null +++ b/service/cloudformation/api_op_DetectStackSetDrift.go @@ -0,0 +1,171 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DetectStackSetDriftInput struct { + _ struct{} `type:"structure"` + + // The ID of the stack set operation. + OperationId *string `min:"1" type:"string" idempotencyToken:"true"` + + // The user-specified preferences for how AWS CloudFormation performs a stack + // set operation. + // + // For more information on maximum concurrent accounts and failure tolerance, + // see Stack set operation options (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-concepts.html#stackset-ops-options). + OperationPreferences *StackSetOperationPreferences `type:"structure"` + + // The name of the stack set on which to perform the drift detection operation. + // + // StackSetName is a required field + StackSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DetectStackSetDriftInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetectStackSetDriftInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DetectStackSetDriftInput"} + if s.OperationId != nil && len(*s.OperationId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("OperationId", 1)) + } + + if s.StackSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("StackSetName")) + } + if s.OperationPreferences != nil { + if err := s.OperationPreferences.Validate(); err != nil { + invalidParams.AddNested("OperationPreferences", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetectStackSetDriftOutput struct { + _ struct{} `type:"structure"` + + // The ID of the drift detection stack set operation. + // + // you can use this operation id with DescribeStackSetOperation to monitor the + // progress of the drift detection operation. + OperationId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DetectStackSetDriftOutput) String() string { + return awsutil.Prettify(s) +} + +const opDetectStackSetDrift = "DetectStackSetDrift" + +// DetectStackSetDriftRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Detect drift on a stack set. When CloudFormation performs drift detection +// on a stack set, it performs drift detection on the stack associated with +// each stack instance in the stack set. For more information, see How CloudFormation +// Performs Drift Detection on a Stack Set (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-drift.html). +// +// DetectStackSetDrift returns the OperationId of the stack set drift detection +// operation. Use this operation id with DescribeStackSetOperation to monitor +// the progress of the drift detection operation. The drift detection operation +// may take some time, depending on the number of stack instances included in +// the stack set, as well as the number of resources included in each stack. +// +// Once the operation has completed, use the following actions to return drift +// information: +// +// * Use DescribeStackSet to return detailed informaiton about the stack +// set, including detailed information about the last completed drift operation +// performed on the stack set. (Information about drift operations that are +// in progress is not included.) +// +// * Use ListStackInstances to return a list of stack instances belonging +// to the stack set, including the drift status and last drift time checked +// of each instance. +// +// * Use DescribeStackInstance to return detailed information about a specific +// stack instance, including its drift status and last drift time checked. +// +// For more information on performing a drift detection operation on a stack +// set, see Detecting Unmanaged Changes in Stack Sets (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-drift.html). +// +// You can only run a single drift detection operation on a given stack set +// at one time. +// +// To stop a drift detection stack set operation, use StopStackSetOperation . +// +// // Example sending a request using DetectStackSetDriftRequest. +// req := client.DetectStackSetDriftRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DetectStackSetDrift +func (c *Client) DetectStackSetDriftRequest(input *DetectStackSetDriftInput) DetectStackSetDriftRequest { + op := &aws.Operation{ + Name: opDetectStackSetDrift, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetectStackSetDriftInput{} + } + + req := c.newRequest(op, input, &DetectStackSetDriftOutput{}) + return DetectStackSetDriftRequest{Request: req, Input: input, Copy: c.DetectStackSetDriftRequest} +} + +// DetectStackSetDriftRequest is the request type for the +// DetectStackSetDrift API operation. +type DetectStackSetDriftRequest struct { + *aws.Request + Input *DetectStackSetDriftInput + Copy func(*DetectStackSetDriftInput) DetectStackSetDriftRequest +} + +// Send marshals and sends the DetectStackSetDrift API request. +func (r DetectStackSetDriftRequest) Send(ctx context.Context) (*DetectStackSetDriftResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DetectStackSetDriftResponse{ + DetectStackSetDriftOutput: r.Request.Data.(*DetectStackSetDriftOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DetectStackSetDriftResponse is the response type for the +// DetectStackSetDrift API operation. +type DetectStackSetDriftResponse struct { + *DetectStackSetDriftOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DetectStackSetDrift request. +func (r *DetectStackSetDriftResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_ListTypeRegistrations.go b/service/cloudformation/api_op_ListTypeRegistrations.go new file mode 100644 index 00000000000..628e0c8a71d --- /dev/null +++ b/service/cloudformation/api_op_ListTypeRegistrations.go @@ -0,0 +1,211 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListTypeRegistrationsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to be returned with a single call. If the number + // of available results exceeds this maximum, the response includes a NextToken + // value that you can assign to the NextToken request parameter to get the next + // set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // If the previous paginated request didn't return all of the remaining results, + // the response object's NextToken parameter value is set to a token. To retrieve + // the next set of results, call this action again and assign that token to + // the request object's NextToken parameter. If there are no remaining results, + // the previous response object's NextToken parameter is set to null. + NextToken *string `min:"1" type:"string"` + + // The current status of the type registration request. + RegistrationStatusFilter RegistrationStatus `type:"string" enum:"true"` + + // The kind of type. + // + // Currently the only valid value is RESOURCE. + Type RegistryType `type:"string" enum:"true"` + + // The Amazon Resource Name (ARN) of the type. + // + // Conditional: You must specify TypeName or Arn. + TypeArn *string `type:"string"` + + // The name of the type. + // + // Conditional: You must specify TypeName or Arn. + TypeName *string `min:"10" type:"string"` +} + +// String returns the string representation +func (s ListTypeRegistrationsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTypeRegistrationsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTypeRegistrationsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NextToken", 1)) + } + if s.TypeName != nil && len(*s.TypeName) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTypeRegistrationsOutput struct { + _ struct{} `type:"structure"` + + // If the request doesn't return all of the remaining results, NextToken is + // set to a token. To retrieve the next set of results, call this action again + // and assign that token to the request object's NextToken parameter. If the + // request returns all results, NextToken is set to null. + NextToken *string `min:"1" type:"string"` + + // A list of type registration tokens. + // + // Use DescribeTypeRegistration to return detailed information about a type + // registration request. + RegistrationTokenList []string `type:"list"` +} + +// String returns the string representation +func (s ListTypeRegistrationsOutput) String() string { + return awsutil.Prettify(s) +} + +const opListTypeRegistrations = "ListTypeRegistrations" + +// ListTypeRegistrationsRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Returns a list of registration tokens for the specified type. +// +// // Example sending a request using ListTypeRegistrationsRequest. +// req := client.ListTypeRegistrationsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListTypeRegistrations +func (c *Client) ListTypeRegistrationsRequest(input *ListTypeRegistrationsInput) ListTypeRegistrationsRequest { + op := &aws.Operation{ + Name: opListTypeRegistrations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTypeRegistrationsInput{} + } + + req := c.newRequest(op, input, &ListTypeRegistrationsOutput{}) + return ListTypeRegistrationsRequest{Request: req, Input: input, Copy: c.ListTypeRegistrationsRequest} +} + +// ListTypeRegistrationsRequest is the request type for the +// ListTypeRegistrations API operation. +type ListTypeRegistrationsRequest struct { + *aws.Request + Input *ListTypeRegistrationsInput + Copy func(*ListTypeRegistrationsInput) ListTypeRegistrationsRequest +} + +// Send marshals and sends the ListTypeRegistrations API request. +func (r ListTypeRegistrationsRequest) Send(ctx context.Context) (*ListTypeRegistrationsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTypeRegistrationsResponse{ + ListTypeRegistrationsOutput: r.Request.Data.(*ListTypeRegistrationsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListTypeRegistrationsRequestPaginator returns a paginator for ListTypeRegistrations. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListTypeRegistrationsRequest(input) +// p := cloudformation.NewListTypeRegistrationsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListTypeRegistrationsPaginator(req ListTypeRegistrationsRequest) ListTypeRegistrationsPaginator { + return ListTypeRegistrationsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListTypeRegistrationsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListTypeRegistrationsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListTypeRegistrationsPaginator struct { + aws.Pager +} + +func (p *ListTypeRegistrationsPaginator) CurrentPage() *ListTypeRegistrationsOutput { + return p.Pager.CurrentPage().(*ListTypeRegistrationsOutput) +} + +// ListTypeRegistrationsResponse is the response type for the +// ListTypeRegistrations API operation. +type ListTypeRegistrationsResponse struct { + *ListTypeRegistrationsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTypeRegistrations request. +func (r *ListTypeRegistrationsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_ListTypeVersions.go b/service/cloudformation/api_op_ListTypeVersions.go new file mode 100644 index 00000000000..7559d037af2 --- /dev/null +++ b/service/cloudformation/api_op_ListTypeVersions.go @@ -0,0 +1,219 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListTypeVersionsInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the type for which you want version summary + // information. + // + // Conditional: You must specify TypeName or Arn. + Arn *string `type:"string"` + + // The deprecation status of the type versions that you want to get summary + // information about. + // + // Valid values include: + // + // * LIVE: The type version is registered and can be used in CloudFormation + // operations, dependent on its provisioning behavior and visibility scope. + // + // * DEPRECATED: The type version has been deregistered and can no longer + // be used in CloudFormation operations. + DeprecatedStatus DeprecatedStatus `type:"string" enum:"true"` + + // The maximum number of results to be returned with a single call. If the number + // of available results exceeds this maximum, the response includes a NextToken + // value that you can assign to the NextToken request parameter to get the next + // set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // If the previous paginated request didn't return all of the remaining results, + // the response object's NextToken parameter value is set to a token. To retrieve + // the next set of results, call this action again and assign that token to + // the request object's NextToken parameter. If there are no remaining results, + // the previous response object's NextToken parameter is set to null. + NextToken *string `min:"1" type:"string"` + + // The kind of the type. + // + // Currently the only valid value is RESOURCE. + Type RegistryType `type:"string" enum:"true"` + + // The name of the type for which you want version summary information. + // + // Conditional: You must specify TypeName or Arn. + TypeName *string `min:"10" type:"string"` +} + +// String returns the string representation +func (s ListTypeVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTypeVersionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTypeVersionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NextToken", 1)) + } + if s.TypeName != nil && len(*s.TypeName) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTypeVersionsOutput struct { + _ struct{} `type:"structure"` + + // If the request doesn't return all of the remaining results, NextToken is + // set to a token. To retrieve the next set of results, call this action again + // and assign that token to the request object's NextToken parameter. If the + // request returns all results, NextToken is set to null. + NextToken *string `min:"1" type:"string"` + + // A list of TypeVersionSummary structures that contain information about the + // specified type's versions. + TypeVersionSummaries []TypeVersionSummary `type:"list"` +} + +// String returns the string representation +func (s ListTypeVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +const opListTypeVersions = "ListTypeVersions" + +// ListTypeVersionsRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Returns summary information about the versions of a type. +// +// // Example sending a request using ListTypeVersionsRequest. +// req := client.ListTypeVersionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListTypeVersions +func (c *Client) ListTypeVersionsRequest(input *ListTypeVersionsInput) ListTypeVersionsRequest { + op := &aws.Operation{ + Name: opListTypeVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTypeVersionsInput{} + } + + req := c.newRequest(op, input, &ListTypeVersionsOutput{}) + return ListTypeVersionsRequest{Request: req, Input: input, Copy: c.ListTypeVersionsRequest} +} + +// ListTypeVersionsRequest is the request type for the +// ListTypeVersions API operation. +type ListTypeVersionsRequest struct { + *aws.Request + Input *ListTypeVersionsInput + Copy func(*ListTypeVersionsInput) ListTypeVersionsRequest +} + +// Send marshals and sends the ListTypeVersions API request. +func (r ListTypeVersionsRequest) Send(ctx context.Context) (*ListTypeVersionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTypeVersionsResponse{ + ListTypeVersionsOutput: r.Request.Data.(*ListTypeVersionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListTypeVersionsRequestPaginator returns a paginator for ListTypeVersions. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListTypeVersionsRequest(input) +// p := cloudformation.NewListTypeVersionsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListTypeVersionsPaginator(req ListTypeVersionsRequest) ListTypeVersionsPaginator { + return ListTypeVersionsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListTypeVersionsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListTypeVersionsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListTypeVersionsPaginator struct { + aws.Pager +} + +func (p *ListTypeVersionsPaginator) CurrentPage() *ListTypeVersionsOutput { + return p.Pager.CurrentPage().(*ListTypeVersionsOutput) +} + +// ListTypeVersionsResponse is the response type for the +// ListTypeVersions API operation. +type ListTypeVersionsResponse struct { + *ListTypeVersionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTypeVersions request. +func (r *ListTypeVersionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_ListTypes.go b/service/cloudformation/api_op_ListTypes.go new file mode 100644 index 00000000000..76b7b7d6deb --- /dev/null +++ b/service/cloudformation/api_op_ListTypes.go @@ -0,0 +1,227 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListTypesInput struct { + _ struct{} `type:"structure"` + + // The deprecation status of the types that you want to get summary information + // about. + // + // Valid values include: + // + // * LIVE: The type is registered for use in CloudFormation operations. + // + // * DEPRECATED: The type has been deregistered and can no longer be used + // in CloudFormation operations. + DeprecatedStatus DeprecatedStatus `type:"string" enum:"true"` + + // The maximum number of results to be returned with a single call. If the number + // of available results exceeds this maximum, the response includes a NextToken + // value that you can assign to the NextToken request parameter to get the next + // set of results. + MaxResults *int64 `min:"1" type:"integer"` + + // If the previous paginated request didn't return all of the remaining results, + // the response object's NextToken parameter value is set to a token. To retrieve + // the next set of results, call this action again and assign that token to + // the request object's NextToken parameter. If there are no remaining results, + // the previous response object's NextToken parameter is set to null. + NextToken *string `min:"1" type:"string"` + + // The provisioning behavior of the type. AWS CloudFormation determines the + // provisioning type during registration, based on the types of handlers in + // the schema handler package submitted. + // + // Valid values include: + // + // * FULLY_MUTABLE: The type includes an update handler to process updates + // to the type during stack update operations. + // + // * IMMUTABLE: The type does not include an update handler, so the type + // cannot be updated and must instead be replaced during stack update operations. + // + // * NON_PROVISIONABLE: The type does not include create, read, and delete + // handlers, and therefore cannot actually be provisioned. + ProvisioningType ProvisioningType `type:"string" enum:"true"` + + // The scope at which the type is visible and usable in CloudFormation operations. + // + // Valid values include: + // + // * PRIVATE: The type is only visible and usable within the account in which + // it is registered. Currently, AWS CloudFormation marks any types you create + // as PRIVATE. + // + // * PUBLIC: The type is publically visible and usable within any Amazon + // account. + Visibility Visibility `type:"string" enum:"true"` +} + +// String returns the string representation +func (s ListTypesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTypesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTypesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTypesOutput struct { + _ struct{} `type:"structure"` + + // If the request doesn't return all of the remaining results, NextToken is + // set to a token. To retrieve the next set of results, call this action again + // and assign that token to the request object's NextToken parameter. If the + // request returns all results, NextToken is set to null. + NextToken *string `min:"1" type:"string"` + + // A list of TypeSummary structures that contain information about the specified + // types. + TypeSummaries []TypeSummary `type:"list"` +} + +// String returns the string representation +func (s ListTypesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListTypes = "ListTypes" + +// ListTypesRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Returns summary information about types that have been registered with CloudFormation. +// +// // Example sending a request using ListTypesRequest. +// req := client.ListTypesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/ListTypes +func (c *Client) ListTypesRequest(input *ListTypesInput) ListTypesRequest { + op := &aws.Operation{ + Name: opListTypes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTypesInput{} + } + + req := c.newRequest(op, input, &ListTypesOutput{}) + return ListTypesRequest{Request: req, Input: input, Copy: c.ListTypesRequest} +} + +// ListTypesRequest is the request type for the +// ListTypes API operation. +type ListTypesRequest struct { + *aws.Request + Input *ListTypesInput + Copy func(*ListTypesInput) ListTypesRequest +} + +// Send marshals and sends the ListTypes API request. +func (r ListTypesRequest) Send(ctx context.Context) (*ListTypesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTypesResponse{ + ListTypesOutput: r.Request.Data.(*ListTypesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListTypesRequestPaginator returns a paginator for ListTypes. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListTypesRequest(input) +// p := cloudformation.NewListTypesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListTypesPaginator(req ListTypesRequest) ListTypesPaginator { + return ListTypesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListTypesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListTypesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListTypesPaginator struct { + aws.Pager +} + +func (p *ListTypesPaginator) CurrentPage() *ListTypesOutput { + return p.Pager.CurrentPage().(*ListTypesOutput) +} + +// ListTypesResponse is the response type for the +// ListTypes API operation. +type ListTypesResponse struct { + *ListTypesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTypes request. +func (r *ListTypesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_RecordHandlerProgress.go b/service/cloudformation/api_op_RecordHandlerProgress.go new file mode 100644 index 00000000000..175310ea9aa --- /dev/null +++ b/service/cloudformation/api_op_RecordHandlerProgress.go @@ -0,0 +1,150 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type RecordHandlerProgressInput struct { + _ struct{} `type:"structure"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + // + // BearerToken is a required field + BearerToken *string `min:"1" type:"string" required:"true"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + ClientRequestToken *string `min:"1" type:"string"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + CurrentOperationStatus OperationStatus `type:"string" enum:"true"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + ErrorCode HandlerErrorCode `type:"string" enum:"true"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + // + // OperationStatus is a required field + OperationStatus OperationStatus `type:"string" required:"true" enum:"true"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + ResourceModel *string `min:"1" type:"string"` + + // Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). + StatusMessage *string `type:"string"` +} + +// String returns the string representation +func (s RecordHandlerProgressInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RecordHandlerProgressInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RecordHandlerProgressInput"} + + if s.BearerToken == nil { + invalidParams.Add(aws.NewErrParamRequired("BearerToken")) + } + if s.BearerToken != nil && len(*s.BearerToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("BearerToken", 1)) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ClientRequestToken", 1)) + } + if len(s.OperationStatus) == 0 { + invalidParams.Add(aws.NewErrParamRequired("OperationStatus")) + } + if s.ResourceModel != nil && len(*s.ResourceModel) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ResourceModel", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RecordHandlerProgressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RecordHandlerProgressOutput) String() string { + return awsutil.Prettify(s) +} + +const opRecordHandlerProgress = "RecordHandlerProgress" + +// RecordHandlerProgressRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Reports progress of a resource handler to CloudFormation. +// +// Reserved for use by the CloudFormation CLI (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/what-is-cloudformation-cli.html). +// Do not use this API in your code. +// +// // Example sending a request using RecordHandlerProgressRequest. +// req := client.RecordHandlerProgressRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/RecordHandlerProgress +func (c *Client) RecordHandlerProgressRequest(input *RecordHandlerProgressInput) RecordHandlerProgressRequest { + op := &aws.Operation{ + Name: opRecordHandlerProgress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RecordHandlerProgressInput{} + } + + req := c.newRequest(op, input, &RecordHandlerProgressOutput{}) + return RecordHandlerProgressRequest{Request: req, Input: input, Copy: c.RecordHandlerProgressRequest} +} + +// RecordHandlerProgressRequest is the request type for the +// RecordHandlerProgress API operation. +type RecordHandlerProgressRequest struct { + *aws.Request + Input *RecordHandlerProgressInput + Copy func(*RecordHandlerProgressInput) RecordHandlerProgressRequest +} + +// Send marshals and sends the RecordHandlerProgress API request. +func (r RecordHandlerProgressRequest) Send(ctx context.Context) (*RecordHandlerProgressResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &RecordHandlerProgressResponse{ + RecordHandlerProgressOutput: r.Request.Data.(*RecordHandlerProgressOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// RecordHandlerProgressResponse is the response type for the +// RecordHandlerProgress API operation. +type RecordHandlerProgressResponse struct { + *RecordHandlerProgressOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// RecordHandlerProgress request. +func (r *RecordHandlerProgressResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_RegisterType.go b/service/cloudformation/api_op_RegisterType.go new file mode 100644 index 00000000000..32a7356b78a --- /dev/null +++ b/service/cloudformation/api_op_RegisterType.go @@ -0,0 +1,208 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type RegisterTypeInput struct { + _ struct{} `type:"structure"` + + // A unique identifier that acts as an idempotency key for this registration + // request. Specifying a client request token prevents CloudFormation from generating + // more than one version of a type from the same registeration request, even + // if the request is submitted multiple times. + ClientRequestToken *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM execution role to use to register + // the type. If your resource type calls AWS APIs in any of its handlers, you + // must create an IAM execution role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) + // that includes the necessary permissions to call those AWS APIs, and provision + // that execution role in your account. CloudFormation then assumes that execution + // role to provide your resource type with the appropriate credentials. + ExecutionRoleArn *string `min:"1" type:"string"` + + // Specifies logging configuration information for a type. + LoggingConfig *LoggingConfig `type:"structure"` + + // A url to the S3 bucket containing the schema handler package that contains + // the schema, event handlers, and associated files for the type you want to + // register. + // + // For information on generating a schema handler package for the type you want + // to register, see submit (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-type-cli-submit.html) + // in the CloudFormation CLI User Guide. + // + // SchemaHandlerPackage is a required field + SchemaHandlerPackage *string `min:"1" type:"string" required:"true"` + + // The kind of type. + // + // Currently, the only valid value is RESOURCE. + Type RegistryType `type:"string" enum:"true"` + + // The name of the type being registered. + // + // We recommend that type names adhere to the following pattern: company_or_organization::service::type. + // + // The following organization namespaces are reserved and cannot be used in + // your resource type names: + // + // * Alexa + // + // * AMZN + // + // * Amazon + // + // * AWS + // + // * Custom + // + // * Dev + // + // TypeName is a required field + TypeName *string `min:"10" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterTypeInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterTypeInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RegisterTypeInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.ExecutionRoleArn != nil && len(*s.ExecutionRoleArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ExecutionRoleArn", 1)) + } + + if s.SchemaHandlerPackage == nil { + invalidParams.Add(aws.NewErrParamRequired("SchemaHandlerPackage")) + } + if s.SchemaHandlerPackage != nil && len(*s.SchemaHandlerPackage) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SchemaHandlerPackage", 1)) + } + + if s.TypeName == nil { + invalidParams.Add(aws.NewErrParamRequired("TypeName")) + } + if s.TypeName != nil && len(*s.TypeName) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 10)) + } + if s.LoggingConfig != nil { + if err := s.LoggingConfig.Validate(); err != nil { + invalidParams.AddNested("LoggingConfig", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterTypeOutput struct { + _ struct{} `type:"structure"` + + // The identifier for this registration request. + // + // Use this registration token when calling DescribeTypeRegistration , which + // returns information about the status and IDs of the type registration. + RegistrationToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s RegisterTypeOutput) String() string { + return awsutil.Prettify(s) +} + +const opRegisterType = "RegisterType" + +// RegisterTypeRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Registers a type with the CloudFormation service. Registering a type makes +// it available for use in CloudFormation templates in your AWS account, and +// includes: +// +// * Validating the resource schema +// +// * Determining which handlers have been specified for the resource +// +// * Making the resource type available for use in your account +// +// For more information on how to develop types and ready them for registeration, +// see Creating Resource Providers (cloudformation-cli/latest/userguide/resource-types.html) +// in the CloudFormation CLI User Guide. +// +// Once you have initiated a registration request using RegisterType , you can +// use DescribeTypeRegistration to monitor the progress of the registration +// request. +// +// // Example sending a request using RegisterTypeRequest. +// req := client.RegisterTypeRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/RegisterType +func (c *Client) RegisterTypeRequest(input *RegisterTypeInput) RegisterTypeRequest { + op := &aws.Operation{ + Name: opRegisterType, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterTypeInput{} + } + + req := c.newRequest(op, input, &RegisterTypeOutput{}) + return RegisterTypeRequest{Request: req, Input: input, Copy: c.RegisterTypeRequest} +} + +// RegisterTypeRequest is the request type for the +// RegisterType API operation. +type RegisterTypeRequest struct { + *aws.Request + Input *RegisterTypeInput + Copy func(*RegisterTypeInput) RegisterTypeRequest +} + +// Send marshals and sends the RegisterType API request. +func (r RegisterTypeRequest) Send(ctx context.Context) (*RegisterTypeResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &RegisterTypeResponse{ + RegisterTypeOutput: r.Request.Data.(*RegisterTypeOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// RegisterTypeResponse is the response type for the +// RegisterType API operation. +type RegisterTypeResponse struct { + *RegisterTypeOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// RegisterType request. +func (r *RegisterTypeResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_op_SetTypeDefaultVersion.go b/service/cloudformation/api_op_SetTypeDefaultVersion.go new file mode 100644 index 00000000000..cd4dde3a1d7 --- /dev/null +++ b/service/cloudformation/api_op_SetTypeDefaultVersion.go @@ -0,0 +1,132 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudformation + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type SetTypeDefaultVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the type for which you want version summary + // information. + // + // Conditional: You must specify TypeName or Arn. + Arn *string `type:"string"` + + // The kind of type. + Type RegistryType `type:"string" enum:"true"` + + // The name of the type. + // + // Conditional: You must specify TypeName or Arn. + TypeName *string `min:"10" type:"string"` + + // The ID of a specific version of the type. The version ID is the value at + // the end of the Amazon Resource Name (ARN) assigned to the type version when + // it is registered. + VersionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SetTypeDefaultVersionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetTypeDefaultVersionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SetTypeDefaultVersionInput"} + if s.TypeName != nil && len(*s.TypeName) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 10)) + } + if s.VersionId != nil && len(*s.VersionId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("VersionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SetTypeDefaultVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetTypeDefaultVersionOutput) String() string { + return awsutil.Prettify(s) +} + +const opSetTypeDefaultVersion = "SetTypeDefaultVersion" + +// SetTypeDefaultVersionRequest returns a request value for making API operation for +// AWS CloudFormation. +// +// Specify the default version of a type. The default version of a type will +// be used in CloudFormation operations. +// +// // Example sending a request using SetTypeDefaultVersionRequest. +// req := client.SetTypeDefaultVersionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/SetTypeDefaultVersion +func (c *Client) SetTypeDefaultVersionRequest(input *SetTypeDefaultVersionInput) SetTypeDefaultVersionRequest { + op := &aws.Operation{ + Name: opSetTypeDefaultVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetTypeDefaultVersionInput{} + } + + req := c.newRequest(op, input, &SetTypeDefaultVersionOutput{}) + return SetTypeDefaultVersionRequest{Request: req, Input: input, Copy: c.SetTypeDefaultVersionRequest} +} + +// SetTypeDefaultVersionRequest is the request type for the +// SetTypeDefaultVersion API operation. +type SetTypeDefaultVersionRequest struct { + *aws.Request + Input *SetTypeDefaultVersionInput + Copy func(*SetTypeDefaultVersionInput) SetTypeDefaultVersionRequest +} + +// Send marshals and sends the SetTypeDefaultVersion API request. +func (r SetTypeDefaultVersionRequest) Send(ctx context.Context) (*SetTypeDefaultVersionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &SetTypeDefaultVersionResponse{ + SetTypeDefaultVersionOutput: r.Request.Data.(*SetTypeDefaultVersionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// SetTypeDefaultVersionResponse is the response type for the +// SetTypeDefaultVersion API operation. +type SetTypeDefaultVersionResponse struct { + *SetTypeDefaultVersionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// SetTypeDefaultVersion request. +func (r *SetTypeDefaultVersionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudformation/api_types.go b/service/cloudformation/api_types.go index ea9f28a6ccb..cbc1a627640 100644 --- a/service/cloudformation/api_types.go +++ b/service/cloudformation/api_types.go @@ -178,6 +178,52 @@ func (s Export) String() string { return awsutil.Prettify(s) } +// Contains logging configuration information for a type. +type LoggingConfig struct { + _ struct{} `type:"structure"` + + // The Amazon CloudWatch log group to which CloudFormation sends error logging + // information when invoking the type's handlers. + // + // LogGroupName is a required field + LogGroupName *string `min:"1" type:"string" required:"true"` + + // The ARN of the role that CloudFormation should assume when sending log entries + // to CloudWatch logs. + // + // LogRoleArn is a required field + LogRoleArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingConfig) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingConfig) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "LoggingConfig"} + + if s.LogGroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("LogGroupName")) + } + if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("LogGroupName", 1)) + } + + if s.LogRoleArn == nil { + invalidParams.Add(aws.NewErrParamRequired("LogRoleArn")) + } + if s.LogRoleArn != nil && len(*s.LogRoleArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("LogRoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The Output data type. type Output struct { _ struct{} `type:"structure"` @@ -936,6 +982,28 @@ type StackInstance struct { // The name of the AWS account that the stack instance is associated with. Account *string `type:"string"` + // Status of the stack instance's actual configuration compared to the expected + // template and parameter configuration of the stack set to which it belongs. + // + // * DRIFTED: The stack differs from the expected template and parameter + // configuration of the stack set to which it belongs. A stack instance is + // considered to have drifted if one or more of the resources in the associated + // stack have drifted. + // + // * NOT_CHECKED: AWS CloudFormation has not checked if the stack instance + // differs from its expected stack set configuration. + // + // * IN_SYNC: The stack instance's actual configuration matches its expected + // stack set configuration. + // + // * UNKNOWN: This value is reserved for future use. + DriftStatus StackDriftStatus `type:"string" enum:"true"` + + // Most recent time when CloudFormation performed a drift detection operation + // on the stack instance. This value will be NULL for any stack instance on + // which drift detection has not yet been performed. + LastDriftCheckTimestamp *time.Time `type:"timestamp"` + // A list of parameters from the stack set template whose values have been overridden // in this stack instance. ParameterOverrides []Parameter `type:"list"` @@ -984,6 +1052,28 @@ type StackInstanceSummary struct { // The name of the AWS account that the stack instance is associated with. Account *string `type:"string"` + // Status of the stack instance's actual configuration compared to the expected + // template and parameter configuration of the stack set to which it belongs. + // + // * DRIFTED: The stack differs from the expected template and parameter + // configuration of the stack set to which it belongs. A stack instance is + // considered to have drifted if one or more of the resources in the associated + // stack have drifted. + // + // * NOT_CHECKED: AWS CloudFormation has not checked if the stack instance + // differs from its expected stack set configuration. + // + // * IN_SYNC: The stack instance's actual configuration matches its expected + // stack set configuration. + // + // * UNKNOWN: This value is reserved for future use. + DriftStatus StackDriftStatus `type:"string" enum:"true"` + + // Most recent time when CloudFormation performed a drift detection operation + // on the stack instance. This value will be NULL for any stack instance on + // which drift detection has not yet been performed. + LastDriftCheckTimestamp *time.Time `type:"timestamp"` + // The name of the AWS region that the stack instance is associated with. Region *string `type:"string"` @@ -1374,6 +1464,13 @@ type StackSet struct { // The Amazon Resource Number (ARN) of the stack set. StackSetARN *string `type:"string"` + // Detailed information about the drift status of the stack set. + // + // For stack sets, contains information about the last completed drift operation + // performed on the stack set. Information about drift operations currently + // in progress is not included. + StackSetDriftDetectionDetails *StackSetDriftDetectionDetails `type:"structure"` + // The ID of the stack set. StackSetId *string `type:"string"` @@ -1397,6 +1494,92 @@ func (s StackSet) String() string { return awsutil.Prettify(s) } +// Detailed information about the drift status of the stack set. +// +// For stack sets, contains information about the last completed drift operation +// performed on the stack set. Information about drift operations in-progress +// is not included. +// +// For stack set operations, includes information about drift operations currently +// being performed on the stack set. +// +// For more information, see Detecting Unmanaged Changes in Stack Sets (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-drift.html) +// in the AWS CloudFormation User Guide. +type StackSetDriftDetectionDetails struct { + _ struct{} `type:"structure"` + + // The status of the stack set drift detection operation. + // + // * COMPLETED: The drift detection operation completed without failing on + // any stack instances. + // + // * FAILED: The drift detection operation exceeded the specified failure + // tolerance. + // + // * PARTIAL_SUCCESS: The drift detection operation completed without exceeding + // the failure tolerance for the operation. + // + // * IN_PROGRESS: The drift detection operation is currently being performed. + // + // * STOPPED: The user has cancelled the drift detection operation. + DriftDetectionStatus StackSetDriftDetectionStatus `type:"string" enum:"true"` + + // Status of the stack set's actual configuration compared to its expected template + // and parameter configuration. A stack set is considered to have drifted if + // one or more of its stack instances have drifted from their expected template + // and parameter configuration. + // + // * DRIFTED: One or more of the stack instances belonging to the stack set + // stack differs from the expected template and parameter configuration. + // A stack instance is considered to have drifted if one or more of the resources + // in the associated stack have drifted. + // + // * NOT_CHECKED: AWS CloudFormation has not checked the stack set for drift. + // + // * IN_SYNC: All of the stack instances belonging to the stack set stack + // match from the expected template and parameter configuration. + DriftStatus StackSetDriftStatus `type:"string" enum:"true"` + + // The number of stack instances that have drifted from the expected template + // and parameter configuration of the stack set. A stack instance is considered + // to have drifted if one or more of the resources in the associated stack do + // not match their expected configuration. + DriftedStackInstancesCount *int64 `type:"integer"` + + // The number of stack instances for which the drift detection operation failed. + FailedStackInstancesCount *int64 `type:"integer"` + + // The number of stack instances that are currently being checked for drift. + InProgressStackInstancesCount *int64 `type:"integer"` + + // The number of stack instances which match the expected template and parameter + // configuration of the stack set. + InSyncStackInstancesCount *int64 `type:"integer"` + + // Most recent time when CloudFormation performed a drift detection operation + // on the stack set. This value will be NULL for any stack set on which drift + // detection has not yet been performed. + LastDriftCheckTimestamp *time.Time `type:"timestamp"` + + // The total number of stack instances belonging to this stack set. + // + // The total number of stack instances is equal to the total of: + // + // * Stack instances that match the stack set configuration. + // + // * Stack instances that have drifted from the stack set configuration. + // + // * Stack instances where the drift detection operation has failed. + // + // * Stack instances currently being checked for drift. + TotalStackInstancesCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s StackSetDriftDetectionDetails) String() string { + return awsutil.Prettify(s) +} + // The structure that contains information about a stack set operation. type StackSetOperation struct { _ struct{} `type:"structure"` @@ -1446,6 +1629,17 @@ type StackSetOperation struct { // stack to a new stack set. RetainStacks *bool `type:"boolean"` + // Detailed information about the drift status of the stack set. This includes + // information about drift operations currently being performed on the stack + // set. + // + // this information will only be present for stack set operations whose Action + // type is DETECT_DRIFT. + // + // For more information, see Detecting Unmanaged Changes in Stack Sets (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-drift.html) + // in the AWS CloudFormation User Guide. + StackSetDriftDetectionDetails *StackSetDriftDetectionDetails `type:"structure"` + // The ID of the stack set. StackSetId *string `type:"string"` @@ -1662,6 +1856,29 @@ type StackSetSummary struct { // or updated. Description *string `min:"1" type:"string"` + // Status of the stack set's actual configuration compared to its expected template + // and parameter configuration. A stack set is considered to have drifted if + // one or more of its stack instances have drifted from their expected template + // and parameter configuration. + // + // * DRIFTED: One or more of the stack instances belonging to the stack set + // stack differs from the expected template and parameter configuration. + // A stack instance is considered to have drifted if one or more of the resources + // in the associated stack have drifted. + // + // * NOT_CHECKED: AWS CloudFormation has not checked the stack set for drift. + // + // * IN_SYNC: All of the stack instances belonging to the stack set stack + // match from the expected template and parameter configuration. + // + // * UNKNOWN: This value is reserved for future use. + DriftStatus StackDriftStatus `type:"string" enum:"true"` + + // Most recent time when CloudFormation performed a drift detection operation + // on the stack set. This value will be NULL for any stack set on which drift + // detection has not yet been performed. + LastDriftCheckTimestamp *time.Time `type:"timestamp"` + // The ID of the stack set. StackSetId *string `type:"string"` @@ -1809,3 +2026,65 @@ type TemplateParameter struct { func (s TemplateParameter) String() string { return awsutil.Prettify(s) } + +// Contains summary information about the specified CloudFormation type. +type TypeSummary struct { + _ struct{} `type:"structure"` + + // The ID of the default version of the type. The default version is used when + // the type version is not specified. + // + // To set the default version of a type, use SetTypeDefaultVersion . + DefaultVersionId *string `min:"1" type:"string"` + + // The description of the type. + Description *string `min:"1" type:"string"` + + // When the current default version of the type was registered. + LastUpdated *time.Time `type:"timestamp"` + + // The kind of type. + Type RegistryType `type:"string" enum:"true"` + + // The Amazon Resource Name (ARN) of the type. + TypeArn *string `type:"string"` + + // The name of the type. + TypeName *string `min:"10" type:"string"` +} + +// String returns the string representation +func (s TypeSummary) String() string { + return awsutil.Prettify(s) +} + +// Contains summary information about a specific version of a CloudFormation +// type. +type TypeVersionSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the type version. + Arn *string `type:"string"` + + // The description of the type version. + Description *string `min:"1" type:"string"` + + // When the version was registered. + TimeCreated *time.Time `type:"timestamp"` + + // The kind of type. + Type RegistryType `type:"string" enum:"true"` + + // The name of the type. + TypeName *string `min:"10" type:"string"` + + // The ID of a specific version of the type. The version ID is the value at + // the end of the Amazon Resource Name (ARN) assigned to the type version when + // it is registered. + VersionId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TypeVersionSummary) String() string { + return awsutil.Prettify(s) +} diff --git a/service/cloudformation/api_waiters.go b/service/cloudformation/api_waiters.go index eac8e08f2f3..e705f3b236f 100644 --- a/service/cloudformation/api_waiters.go +++ b/service/cloudformation/api_waiters.go @@ -372,3 +372,47 @@ func (c *Client) WaitUntilStackUpdateComplete(ctx context.Context, input *Descri return w.Wait(ctx) } + +// WaitUntilTypeRegistrationComplete uses the AWS CloudFormation API operation +// DescribeTypeRegistration to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Client) WaitUntilTypeRegistrationComplete(ctx context.Context, input *DescribeTypeRegistrationInput, opts ...aws.WaiterOption) error { + w := aws.Waiter{ + Name: "WaitUntilTypeRegistrationComplete", + MaxAttempts: 120, + Delay: aws.ConstantWaiterDelay(30 * time.Second), + Acceptors: []aws.WaiterAcceptor{ + { + State: aws.SuccessWaiterState, + Matcher: aws.PathWaiterMatch, Argument: "ProgressStatus", + Expected: "COMPLETE", + }, + { + State: aws.FailureWaiterState, + Matcher: aws.PathWaiterMatch, Argument: "ProgressStatus", + Expected: "FAILED", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []aws.Option) (*aws.Request, error) { + var inCpy *DescribeTypeRegistrationInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req := c.DescribeTypeRegistrationRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req.Request, nil + }, + } + w.ApplyOptions(opts...) + + return w.Wait(ctx) +} diff --git a/service/cloudformation/cloudformationiface/interface.go b/service/cloudformation/cloudformationiface/interface.go index b5245a660dd..08e99255e2d 100644 --- a/service/cloudformation/cloudformationiface/interface.go +++ b/service/cloudformation/cloudformationiface/interface.go @@ -84,6 +84,8 @@ type ClientAPI interface { DeleteStackSetRequest(*cloudformation.DeleteStackSetInput) cloudformation.DeleteStackSetRequest + DeregisterTypeRequest(*cloudformation.DeregisterTypeInput) cloudformation.DeregisterTypeRequest + DescribeAccountLimitsRequest(*cloudformation.DescribeAccountLimitsInput) cloudformation.DescribeAccountLimitsRequest DescribeChangeSetRequest(*cloudformation.DescribeChangeSetInput) cloudformation.DescribeChangeSetRequest @@ -106,10 +108,16 @@ type ClientAPI interface { DescribeStacksRequest(*cloudformation.DescribeStacksInput) cloudformation.DescribeStacksRequest + DescribeTypeRequest(*cloudformation.DescribeTypeInput) cloudformation.DescribeTypeRequest + + DescribeTypeRegistrationRequest(*cloudformation.DescribeTypeRegistrationInput) cloudformation.DescribeTypeRegistrationRequest + DetectStackDriftRequest(*cloudformation.DetectStackDriftInput) cloudformation.DetectStackDriftRequest DetectStackResourceDriftRequest(*cloudformation.DetectStackResourceDriftInput) cloudformation.DetectStackResourceDriftRequest + DetectStackSetDriftRequest(*cloudformation.DetectStackSetDriftInput) cloudformation.DetectStackSetDriftRequest + EstimateTemplateCostRequest(*cloudformation.EstimateTemplateCostInput) cloudformation.EstimateTemplateCostRequest ExecuteChangeSetRequest(*cloudformation.ExecuteChangeSetInput) cloudformation.ExecuteChangeSetRequest @@ -138,8 +146,20 @@ type ClientAPI interface { ListStacksRequest(*cloudformation.ListStacksInput) cloudformation.ListStacksRequest + ListTypeRegistrationsRequest(*cloudformation.ListTypeRegistrationsInput) cloudformation.ListTypeRegistrationsRequest + + ListTypeVersionsRequest(*cloudformation.ListTypeVersionsInput) cloudformation.ListTypeVersionsRequest + + ListTypesRequest(*cloudformation.ListTypesInput) cloudformation.ListTypesRequest + + RecordHandlerProgressRequest(*cloudformation.RecordHandlerProgressInput) cloudformation.RecordHandlerProgressRequest + + RegisterTypeRequest(*cloudformation.RegisterTypeInput) cloudformation.RegisterTypeRequest + SetStackPolicyRequest(*cloudformation.SetStackPolicyInput) cloudformation.SetStackPolicyRequest + SetTypeDefaultVersionRequest(*cloudformation.SetTypeDefaultVersionInput) cloudformation.SetTypeDefaultVersionRequest + SignalResourceRequest(*cloudformation.SignalResourceInput) cloudformation.SignalResourceRequest StopStackSetOperationRequest(*cloudformation.StopStackSetOperationInput) cloudformation.StopStackSetOperationRequest @@ -165,6 +185,8 @@ type ClientAPI interface { WaitUntilStackImportComplete(context.Context, *cloudformation.DescribeStacksInput, ...aws.WaiterOption) error WaitUntilStackUpdateComplete(context.Context, *cloudformation.DescribeStacksInput, ...aws.WaiterOption) error + + WaitUntilTypeRegistrationComplete(context.Context, *cloudformation.DescribeTypeRegistrationInput, ...aws.WaiterOption) error } var _ ClientAPI = (*cloudformation.Client)(nil) diff --git a/service/cloudsearch/api_enums.go b/service/cloudsearch/api_enums.go index b3f562a6dea..082e9d75f8d 100644 --- a/service/cloudsearch/api_enums.go +++ b/service/cloudsearch/api_enums.go @@ -176,3 +176,21 @@ func (enum SuggesterFuzzyMatching) MarshalValueBuf(b []byte) ([]byte, error) { b = b[0:0] return append(b, enum...), nil } + +// The minimum required TLS version. +type TLSSecurityPolicy string + +// Enum values for TLSSecurityPolicy +const ( + TLSSecurityPolicyPolicyMinTls10201907 TLSSecurityPolicy = "Policy-Min-TLS-1-0-2019-07" + TLSSecurityPolicyPolicyMinTls12201907 TLSSecurityPolicy = "Policy-Min-TLS-1-2-2019-07" +) + +func (enum TLSSecurityPolicy) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum TLSSecurityPolicy) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} diff --git a/service/cloudsearch/api_errors.go b/service/cloudsearch/api_errors.go index f0ed2f1e2fc..b807f29ab04 100644 --- a/service/cloudsearch/api_errors.go +++ b/service/cloudsearch/api_errors.go @@ -41,4 +41,10 @@ const ( // The request was rejected because it attempted to reference a resource that // does not exist. ErrCodeResourceNotFoundException = "ResourceNotFound" + + // ErrCodeValidationException for service response error code + // "ValidationException". + // + // The request was rejected because it has invalid parameters. + ErrCodeValidationException = "ValidationException" ) diff --git a/service/cloudsearch/api_integ_test.go b/service/cloudsearch/api_integ_test.go new file mode 100644 index 00000000000..d582f55c799 --- /dev/null +++ b/service/cloudsearch/api_integ_test.go @@ -0,0 +1,63 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// +build integration + +package cloudsearch_test + +import ( + "context" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/awserr" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + "github.com/aws/aws-sdk-go-v2/internal/awstesting/integration" + "github.com/aws/aws-sdk-go-v2/service/cloudsearch" +) + +var _ aws.Config +var _ awserr.Error + +func TestInteg_00_DescribeDomains(t *testing.T) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + cfg := integration.ConfigWithDefaultRegion("us-west-2") + svc := cloudsearch.New(cfg) + params := &cloudsearch.DescribeDomainsInput{} + + req := svc.DescribeDomainsRequest(params) + req.Handlers.Validate.Remove(defaults.ValidateParametersHandler) + _, err := req.Send(ctx) + if err != nil { + t.Errorf("expect no error, got %v", err) + } +} +func TestInteg_01_DescribeIndexFields(t *testing.T) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + cfg := integration.ConfigWithDefaultRegion("us-west-2") + svc := cloudsearch.New(cfg) + params := &cloudsearch.DescribeIndexFieldsInput{ + DomainName: aws.String("fakedomain"), + } + + req := svc.DescribeIndexFieldsRequest(params) + req.Handlers.Validate.Remove(defaults.ValidateParametersHandler) + _, err := req.Send(ctx) + if err == nil { + t.Fatalf("expect request to fail") + } + aerr, ok := err.(awserr.RequestFailure) + if !ok { + t.Fatalf("expect awserr, was %T", err) + } + if len(aerr.Code()) == 0 { + t.Errorf("expect non-empty error code") + } + if v := aerr.Code(); v == aws.ErrCodeSerialization { + t.Errorf("expect API error code got serialization failure") + } +} diff --git a/service/cloudsearch/api_op_DescribeDomainEndpointOptions.go b/service/cloudsearch/api_op_DescribeDomainEndpointOptions.go new file mode 100644 index 00000000000..a03bcbc6063 --- /dev/null +++ b/service/cloudsearch/api_op_DescribeDomainEndpointOptions.go @@ -0,0 +1,131 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudsearch + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Container for the parameters to the DescribeDomainEndpointOptions operation. +// Specify the name of the domain you want to describe. To show the active configuration +// and exclude any pending changes, set the Deployed option to true. +type DescribeDomainEndpointOptionsInput struct { + _ struct{} `type:"structure"` + + // Whether to retrieve the latest configuration (which might be in a Processing + // state) or the current, active configuration. Defaults to false. + Deployed *bool `type:"boolean"` + + // A string that represents the name of a domain. + // + // DomainName is a required field + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDomainEndpointOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDomainEndpointOptionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeDomainEndpointOptionsInput"} + + if s.DomainName == nil { + invalidParams.Add(aws.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(aws.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a DescribeDomainEndpointOptions request. Contains the status +// and configuration of a search domain's endpoint options. +type DescribeDomainEndpointOptionsOutput struct { + _ struct{} `type:"structure"` + + // The status and configuration of a search domain's endpoint options. + DomainEndpointOptions *DomainEndpointOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s DescribeDomainEndpointOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeDomainEndpointOptions = "DescribeDomainEndpointOptions" + +// DescribeDomainEndpointOptionsRequest returns a request value for making API operation for +// Amazon CloudSearch. +// +// Returns the domain's endpoint options, specifically whether all requests +// to the domain must arrive over HTTPS. For more information, see Configuring +// Domain Endpoint Options (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-domain-endpoint-options.html) +// in the Amazon CloudSearch Developer Guide. +// +// // Example sending a request using DescribeDomainEndpointOptionsRequest. +// req := client.DescribeDomainEndpointOptionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +func (c *Client) DescribeDomainEndpointOptionsRequest(input *DescribeDomainEndpointOptionsInput) DescribeDomainEndpointOptionsRequest { + op := &aws.Operation{ + Name: opDescribeDomainEndpointOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDomainEndpointOptionsInput{} + } + + req := c.newRequest(op, input, &DescribeDomainEndpointOptionsOutput{}) + return DescribeDomainEndpointOptionsRequest{Request: req, Input: input, Copy: c.DescribeDomainEndpointOptionsRequest} +} + +// DescribeDomainEndpointOptionsRequest is the request type for the +// DescribeDomainEndpointOptions API operation. +type DescribeDomainEndpointOptionsRequest struct { + *aws.Request + Input *DescribeDomainEndpointOptionsInput + Copy func(*DescribeDomainEndpointOptionsInput) DescribeDomainEndpointOptionsRequest +} + +// Send marshals and sends the DescribeDomainEndpointOptions API request. +func (r DescribeDomainEndpointOptionsRequest) Send(ctx context.Context) (*DescribeDomainEndpointOptionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeDomainEndpointOptionsResponse{ + DescribeDomainEndpointOptionsOutput: r.Request.Data.(*DescribeDomainEndpointOptionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeDomainEndpointOptionsResponse is the response type for the +// DescribeDomainEndpointOptions API operation. +type DescribeDomainEndpointOptionsResponse struct { + *DescribeDomainEndpointOptionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeDomainEndpointOptions request. +func (r *DescribeDomainEndpointOptionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudsearch/api_op_UpdateDomainEndpointOptions.go b/service/cloudsearch/api_op_UpdateDomainEndpointOptions.go new file mode 100644 index 00000000000..2ffeb6afe72 --- /dev/null +++ b/service/cloudsearch/api_op_UpdateDomainEndpointOptions.go @@ -0,0 +1,138 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudsearch + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Container for the parameters to the UpdateDomainEndpointOptions operation. +// Specifies the name of the domain you want to update and the domain endpoint +// options. +type UpdateDomainEndpointOptionsInput struct { + _ struct{} `type:"structure"` + + // Whether to require that all requests to the domain arrive over HTTPS. We + // recommend Policy-Min-TLS-1-2-2019-07 for TLSSecurityPolicy. For compatibility + // with older clients, the default is Policy-Min-TLS-1-0-2019-07. + // + // DomainEndpointOptions is a required field + DomainEndpointOptions *DomainEndpointOptions `type:"structure" required:"true"` + + // A string that represents the name of a domain. + // + // DomainName is a required field + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateDomainEndpointOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDomainEndpointOptionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateDomainEndpointOptionsInput"} + + if s.DomainEndpointOptions == nil { + invalidParams.Add(aws.NewErrParamRequired("DomainEndpointOptions")) + } + + if s.DomainName == nil { + invalidParams.Add(aws.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(aws.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The result of a UpdateDomainEndpointOptions request. Contains the configuration +// and status of the domain's endpoint options. +type UpdateDomainEndpointOptionsOutput struct { + _ struct{} `type:"structure"` + + // The newly-configured domain endpoint options. + DomainEndpointOptions *DomainEndpointOptionsStatus `type:"structure"` +} + +// String returns the string representation +func (s UpdateDomainEndpointOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateDomainEndpointOptions = "UpdateDomainEndpointOptions" + +// UpdateDomainEndpointOptionsRequest returns a request value for making API operation for +// Amazon CloudSearch. +// +// Updates the domain's endpoint options, specifically whether all requests +// to the domain must arrive over HTTPS. For more information, see Configuring +// Domain Endpoint Options (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-domain-endpoint-options.html) +// in the Amazon CloudSearch Developer Guide. +// +// // Example sending a request using UpdateDomainEndpointOptionsRequest. +// req := client.UpdateDomainEndpointOptionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +func (c *Client) UpdateDomainEndpointOptionsRequest(input *UpdateDomainEndpointOptionsInput) UpdateDomainEndpointOptionsRequest { + op := &aws.Operation{ + Name: opUpdateDomainEndpointOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDomainEndpointOptionsInput{} + } + + req := c.newRequest(op, input, &UpdateDomainEndpointOptionsOutput{}) + return UpdateDomainEndpointOptionsRequest{Request: req, Input: input, Copy: c.UpdateDomainEndpointOptionsRequest} +} + +// UpdateDomainEndpointOptionsRequest is the request type for the +// UpdateDomainEndpointOptions API operation. +type UpdateDomainEndpointOptionsRequest struct { + *aws.Request + Input *UpdateDomainEndpointOptionsInput + Copy func(*UpdateDomainEndpointOptionsInput) UpdateDomainEndpointOptionsRequest +} + +// Send marshals and sends the UpdateDomainEndpointOptions API request. +func (r UpdateDomainEndpointOptionsRequest) Send(ctx context.Context) (*UpdateDomainEndpointOptionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateDomainEndpointOptionsResponse{ + UpdateDomainEndpointOptionsOutput: r.Request.Data.(*UpdateDomainEndpointOptionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateDomainEndpointOptionsResponse is the response type for the +// UpdateDomainEndpointOptions API operation. +type UpdateDomainEndpointOptionsResponse struct { + *UpdateDomainEndpointOptionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateDomainEndpointOptions request. +func (r *UpdateDomainEndpointOptionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudsearch/api_types.go b/service/cloudsearch/api_types.go index b896e03f753..426447b5570 100644 --- a/service/cloudsearch/api_types.go +++ b/service/cloudsearch/api_types.go @@ -306,6 +306,42 @@ func (s *DocumentSuggesterOptions) Validate() error { return nil } +// The domain's endpoint options. +type DomainEndpointOptions struct { + _ struct{} `type:"structure"` + + // Whether the domain is HTTPS only enabled. + EnforceHTTPS *bool `type:"boolean"` + + // The minimum required TLS version + TLSSecurityPolicy TLSSecurityPolicy `type:"string" enum:"true"` +} + +// String returns the string representation +func (s DomainEndpointOptions) String() string { + return awsutil.Prettify(s) +} + +// The configuration and status of the domain's endpoint options. +type DomainEndpointOptionsStatus struct { + _ struct{} `type:"structure"` + + // The domain endpoint options configured for the domain. + // + // Options is a required field + Options *DomainEndpointOptions `type:"structure" required:"true"` + + // The status of the configured domain endpoint options. + // + // Status is a required field + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DomainEndpointOptionsStatus) String() string { + return awsutil.Prettify(s) +} + // The current status of the search domain. type DomainStatus struct { _ struct{} `type:"structure"` diff --git a/service/cloudsearch/cloudsearchiface/interface.go b/service/cloudsearch/cloudsearchiface/interface.go index 9d8cc661608..e3f81be3101 100644 --- a/service/cloudsearch/cloudsearchiface/interface.go +++ b/service/cloudsearch/cloudsearchiface/interface.go @@ -87,6 +87,8 @@ type ClientAPI interface { DescribeAvailabilityOptionsRequest(*cloudsearch.DescribeAvailabilityOptionsInput) cloudsearch.DescribeAvailabilityOptionsRequest + DescribeDomainEndpointOptionsRequest(*cloudsearch.DescribeDomainEndpointOptionsInput) cloudsearch.DescribeDomainEndpointOptionsRequest + DescribeDomainsRequest(*cloudsearch.DescribeDomainsInput) cloudsearch.DescribeDomainsRequest DescribeExpressionsRequest(*cloudsearch.DescribeExpressionsInput) cloudsearch.DescribeExpressionsRequest @@ -105,6 +107,8 @@ type ClientAPI interface { UpdateAvailabilityOptionsRequest(*cloudsearch.UpdateAvailabilityOptionsInput) cloudsearch.UpdateAvailabilityOptionsRequest + UpdateDomainEndpointOptionsRequest(*cloudsearch.UpdateDomainEndpointOptionsInput) cloudsearch.UpdateDomainEndpointOptionsRequest + UpdateScalingParametersRequest(*cloudsearch.UpdateScalingParametersInput) cloudsearch.UpdateScalingParametersRequest UpdateServiceAccessPoliciesRequest(*cloudsearch.UpdateServiceAccessPoliciesInput) cloudsearch.UpdateServiceAccessPoliciesRequest diff --git a/service/cloudtrail/api_enums.go b/service/cloudtrail/api_enums.go index 833e547adc0..55b92bdb589 100644 --- a/service/cloudtrail/api_enums.go +++ b/service/cloudtrail/api_enums.go @@ -2,6 +2,38 @@ package cloudtrail +type EventCategory string + +// Enum values for EventCategory +const ( + EventCategoryInsight EventCategory = "insight" +) + +func (enum EventCategory) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum EventCategory) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type InsightType string + +// Enum values for InsightType +const ( + InsightTypeApiCallRateInsight InsightType = "ApiCallRateInsight" +) + +func (enum InsightType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum InsightType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type LookupAttributeKey string // Enum values for LookupAttributeKey diff --git a/service/cloudtrail/api_errors.go b/service/cloudtrail/api_errors.go index 837329e9cce..bc55c48bb89 100644 --- a/service/cloudtrail/api_errors.go +++ b/service/cloudtrail/api_errors.go @@ -28,6 +28,13 @@ const ( // Cannot set a CloudWatch Logs delivery for this region. ErrCodeCloudWatchLogsDeliveryUnavailableException = "CloudWatchLogsDeliveryUnavailableException" + // ErrCodeInsightNotEnabledException for service response error code + // "InsightNotEnabledException". + // + // If you run GetInsightSelectors on a trail that does not have Insights events + // enabled, the operation throws the exception InsightNotEnabledException. + ErrCodeInsightNotEnabledException = "InsightNotEnabledException" + // ErrCodeInsufficientDependencyServiceAccessPermissionException for service response error code // "InsufficientDependencyServiceAccessPermissionException". // @@ -68,6 +75,13 @@ const ( // This exception is thrown when the provided role is not valid. ErrCodeInvalidCloudWatchLogsRoleArnException = "InvalidCloudWatchLogsRoleArnException" + // ErrCodeInvalidEventCategoryException for service response error code + // "InvalidEventCategoryException". + // + // Occurs if an event category that is not valid is specified as a value of + // EventCategory. + ErrCodeInvalidEventCategoryException = "InvalidEventCategoryException" + // ErrCodeInvalidEventSelectorsException for service response error code // "InvalidEventSelectorsException". // @@ -99,6 +113,14 @@ const ( // other than the region in which the trail was created. ErrCodeInvalidHomeRegionException = "InvalidHomeRegionException" + // ErrCodeInvalidInsightSelectorsException for service response error code + // "InvalidInsightSelectorsException". + // + // The formatting or syntax of the InsightSelectors JSON statement in your PutInsightSelectors + // or GetInsightSelectors request is not valid, or the specified insight type + // in the InsightSelectors statement is not a valid insight type. + ErrCodeInvalidInsightSelectorsException = "InvalidInsightSelectorsException" + // ErrCodeInvalidKmsKeyIdException for service response error code // "InvalidKmsKeyIdException". // diff --git a/service/cloudtrail/api_op_DescribeTrails.go b/service/cloudtrail/api_op_DescribeTrails.go index 15c3bb300f8..a983922945c 100644 --- a/service/cloudtrail/api_op_DescribeTrails.go +++ b/service/cloudtrail/api_op_DescribeTrails.go @@ -53,7 +53,11 @@ func (s DescribeTrailsInput) String() string { type DescribeTrailsOutput struct { _ struct{} `type:"structure"` - // The list of trail objects. + // The list of trail objects. Trail objects with string values are only returned + // if values for the objects exist in a trail's configuration. For example, + // SNSTopicName and SNSTopicARN are only returned in results if a trail is configured + // to send SNS notifications. Similarly, KMSKeyId only appears in results if + // a trail's log files are encrypted with AWS KMS-managed keys. TrailList []Trail `locationName:"trailList" type:"list"` } diff --git a/service/cloudtrail/api_op_GetInsightSelectors.go b/service/cloudtrail/api_op_GetInsightSelectors.go new file mode 100644 index 00000000000..b66745319d2 --- /dev/null +++ b/service/cloudtrail/api_op_GetInsightSelectors.go @@ -0,0 +1,147 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudtrail + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type GetInsightSelectorsInput struct { + _ struct{} `type:"structure"` + + // Specifies the name of the trail or trail ARN. If you specify a trail name, + // the string must meet the following requirements: + // + // * Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-) + // + // * Start with a letter or number, and end with a letter or number + // + // * Be between 3 and 128 characters + // + // * Have no adjacent periods, underscores or dashes. Names like my-_namespace + // and my--namespace are not valid. + // + // * Not be in IP address format (for example, 192.168.5.4) + // + // If you specify a trail ARN, it must be in the format: + // + // arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail + // + // TrailName is a required field + TrailName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetInsightSelectorsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInsightSelectorsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetInsightSelectorsInput"} + + if s.TrailName == nil { + invalidParams.Add(aws.NewErrParamRequired("TrailName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetInsightSelectorsOutput struct { + _ struct{} `type:"structure"` + + // A JSON string that contains the insight types you want to log on a trail. + // In this release, only ApiCallRateInsight is supported as an insight type. + InsightSelectors []InsightSelector `type:"list"` + + // The Amazon Resource Name (ARN) of a trail for which you want to get Insights + // selectors. + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s GetInsightSelectorsOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetInsightSelectors = "GetInsightSelectors" + +// GetInsightSelectorsRequest returns a request value for making API operation for +// AWS CloudTrail. +// +// Describes the settings for the Insights event selectors that you configured +// for your trail. GetInsightSelectors shows if CloudTrail Insights event logging +// is enabled on the trail, and if it is, which insight types are enabled. If +// you run GetInsightSelectors on a trail that does not have Insights events +// enabled, the operation throws the exception InsightNotEnabledException +// +// For more information, see Logging CloudTrail Insights Events for Trails (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-insights-events-with-cloudtrail.html) +// in the AWS CloudTrail User Guide. +// +// // Example sending a request using GetInsightSelectorsRequest. +// req := client.GetInsightSelectorsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-2013-11-01/GetInsightSelectors +func (c *Client) GetInsightSelectorsRequest(input *GetInsightSelectorsInput) GetInsightSelectorsRequest { + op := &aws.Operation{ + Name: opGetInsightSelectors, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetInsightSelectorsInput{} + } + + req := c.newRequest(op, input, &GetInsightSelectorsOutput{}) + return GetInsightSelectorsRequest{Request: req, Input: input, Copy: c.GetInsightSelectorsRequest} +} + +// GetInsightSelectorsRequest is the request type for the +// GetInsightSelectors API operation. +type GetInsightSelectorsRequest struct { + *aws.Request + Input *GetInsightSelectorsInput + Copy func(*GetInsightSelectorsInput) GetInsightSelectorsRequest +} + +// Send marshals and sends the GetInsightSelectors API request. +func (r GetInsightSelectorsRequest) Send(ctx context.Context) (*GetInsightSelectorsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetInsightSelectorsResponse{ + GetInsightSelectorsOutput: r.Request.Data.(*GetInsightSelectorsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetInsightSelectorsResponse is the response type for the +// GetInsightSelectors API operation. +type GetInsightSelectorsResponse struct { + *GetInsightSelectorsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetInsightSelectors request. +func (r *GetInsightSelectorsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudtrail/api_op_ListTrails.go b/service/cloudtrail/api_op_ListTrails.go index 18f086d8bd5..e2050c90e01 100644 --- a/service/cloudtrail/api_op_ListTrails.go +++ b/service/cloudtrail/api_op_ListTrails.go @@ -12,6 +12,11 @@ import ( type ListTrailsInput struct { _ struct{} `type:"structure"` + // The token to use to get the next page of results after a previous API call. + // This token must be passed in with the same parameters that were specified + // in the the original call. For example, if the original call specified an + // AttributeKey of 'Username' with a value of 'root', the call with NextToken + // should include those same parameters. NextToken *string `type:"string"` } @@ -23,6 +28,11 @@ func (s ListTrailsInput) String() string { type ListTrailsOutput struct { _ struct{} `type:"structure"` + // The token to use to get the next page of results after a previous API call. + // If the token does not appear, there are no more results to return. The token + // must be passed in with the same parameters as the previous call. For example, + // if the original call specified an AttributeKey of 'Username' with a value + // of 'root', the call with NextToken should include those same parameters. NextToken *string `type:"string"` // Returns the name, ARN, and home region of trails in the current account. diff --git a/service/cloudtrail/api_op_LookupEvents.go b/service/cloudtrail/api_op_LookupEvents.go index 17447047155..7008be0059b 100644 --- a/service/cloudtrail/api_op_LookupEvents.go +++ b/service/cloudtrail/api_op_LookupEvents.go @@ -20,6 +20,12 @@ type LookupEventsInput struct { // error is returned. EndTime *time.Time `type:"timestamp"` + // Specifies the event category. If you do not specify an event category, events + // of the category are not returned in the response. For example, if you do + // not specify insight as the value of EventCategory, no Insights events are + // returned. + EventCategory EventCategory `type:"string" enum:"true"` + // Contains a list of lookup attributes. Currently the list can contain only // one item. LookupAttributes []LookupAttribute `type:"list"` @@ -94,8 +100,10 @@ const opLookupEvents = "LookupEvents" // AWS CloudTrail. // // Looks up management events (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-management-events) -// captured by CloudTrail. You can look up events that occurred in a region -// within the last 90 days. Lookup supports the following attributes: +// or CloudTrail Insights events (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-insights-events) +// that are captured by CloudTrail. You can look up events that occurred in +// a region within the last 90 days. Lookup supports the following attributes +// for management events: // // * AWS access key // @@ -113,16 +121,21 @@ const opLookupEvents = "LookupEvents" // // * User name // +// Lookup supports the following attributes for Insights events: +// +// * Event ID +// +// * Event name +// +// * Event source +// // All attributes are optional. The default number of results returned is 50, // with a maximum of 50 possible. The response includes a token that you can // use to get the next page of results. // -// The rate of lookup requests is limited to one per second per account. If +// The rate of lookup requests is limited to two per second per account. If // this limit is exceeded, a throttling error occurs. // -// Events that occurred during the selected time range will not be available -// for lookup if CloudTrail logging was not enabled when the events occurred. -// // // Example sending a request using LookupEventsRequest. // req := client.LookupEventsRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/cloudtrail/api_op_PutInsightSelectors.go b/service/cloudtrail/api_op_PutInsightSelectors.go new file mode 100644 index 00000000000..659f69f3045 --- /dev/null +++ b/service/cloudtrail/api_op_PutInsightSelectors.go @@ -0,0 +1,137 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package cloudtrail + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type PutInsightSelectorsInput struct { + _ struct{} `type:"structure"` + + // A JSON string that contains the insight types you want to log on a trail. + // In this release, only ApiCallRateInsight is supported as an insight type. + // + // InsightSelectors is a required field + InsightSelectors []InsightSelector `type:"list" required:"true"` + + // The name of the CloudTrail trail for which you want to change or add Insights + // selectors. + // + // TrailName is a required field + TrailName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutInsightSelectorsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutInsightSelectorsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutInsightSelectorsInput"} + + if s.InsightSelectors == nil { + invalidParams.Add(aws.NewErrParamRequired("InsightSelectors")) + } + + if s.TrailName == nil { + invalidParams.Add(aws.NewErrParamRequired("TrailName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutInsightSelectorsOutput struct { + _ struct{} `type:"structure"` + + // A JSON string that contains the insight types you want to log on a trail. + // In this release, only ApiCallRateInsight is supported as an insight type. + InsightSelectors []InsightSelector `type:"list"` + + // The Amazon Resource Name (ARN) of a trail for which you want to change or + // add Insights selectors. + TrailARN *string `type:"string"` +} + +// String returns the string representation +func (s PutInsightSelectorsOutput) String() string { + return awsutil.Prettify(s) +} + +const opPutInsightSelectors = "PutInsightSelectors" + +// PutInsightSelectorsRequest returns a request value for making API operation for +// AWS CloudTrail. +// +// Lets you enable Insights event logging by specifying the Insights selectors +// that you want to enable on an existing trail. You also use PutInsightSelectors +// to turn off Insights event logging, by passing an empty list of insight types. +// In this release, only ApiCallRateInsight is supported as an Insights selector. +// +// // Example sending a request using PutInsightSelectorsRequest. +// req := client.PutInsightSelectorsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/cloudtrail-2013-11-01/PutInsightSelectors +func (c *Client) PutInsightSelectorsRequest(input *PutInsightSelectorsInput) PutInsightSelectorsRequest { + op := &aws.Operation{ + Name: opPutInsightSelectors, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutInsightSelectorsInput{} + } + + req := c.newRequest(op, input, &PutInsightSelectorsOutput{}) + return PutInsightSelectorsRequest{Request: req, Input: input, Copy: c.PutInsightSelectorsRequest} +} + +// PutInsightSelectorsRequest is the request type for the +// PutInsightSelectors API operation. +type PutInsightSelectorsRequest struct { + *aws.Request + Input *PutInsightSelectorsInput + Copy func(*PutInsightSelectorsInput) PutInsightSelectorsRequest +} + +// Send marshals and sends the PutInsightSelectors API request. +func (r PutInsightSelectorsRequest) Send(ctx context.Context) (*PutInsightSelectorsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutInsightSelectorsResponse{ + PutInsightSelectorsOutput: r.Request.Data.(*PutInsightSelectorsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutInsightSelectorsResponse is the response type for the +// PutInsightSelectors API operation. +type PutInsightSelectorsResponse struct { + *PutInsightSelectorsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutInsightSelectors request. +func (r *PutInsightSelectorsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/cloudtrail/api_types.go b/service/cloudtrail/api_types.go index fe1b97ec65b..569a1bca112 100644 --- a/service/cloudtrail/api_types.go +++ b/service/cloudtrail/api_types.go @@ -13,8 +13,8 @@ var _ aws.Config var _ = awsutil.Prettify // The Amazon S3 buckets or AWS Lambda functions that you specify in your event -// selectors for your trail to log data events. Data events provide insight -// into the resource operations performed on or within a resource itself. These +// selectors for your trail to log data events. Data events provide information +// about the resource operations performed on or within a resource itself. These // are also known as data plane operations. You can specify up to 250 data resources // for a trail. // @@ -162,6 +162,14 @@ type EventSelector struct { // in the AWS CloudTrail User Guide. DataResources []DataResource `type:"list"` + // An optional list of service event sources from which you do not want management + // events to be logged on your trail. In this release, the list can be empty + // (disables the filter), or it can filter out AWS Key Management Service events + // by containing "kms.amazonaws.com". By default, ExcludeManagementEventSources + // is empty, and AWS KMS events are included in events that are logged to your + // trail. + ExcludeManagementEventSources []string `type:"list"` + // Specify if you want your event selector to include management events for // your trail. // @@ -184,6 +192,21 @@ func (s EventSelector) String() string { return awsutil.Prettify(s) } +// A JSON string that contains a list of insight types that are logged on a +// trail. +type InsightSelector struct { + _ struct{} `type:"structure"` + + // The type of insights to log on a trail. In this release, only ApiCallRateInsight + // is supported as an insight type. + InsightType InsightType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s InsightSelector) String() string { + return awsutil.Prettify(s) +} + // Specifies an attribute and value that filter the events returned. type LookupAttribute struct { _ struct{} `type:"structure"` @@ -333,6 +356,10 @@ type Trail struct { // Specifies if the trail has custom event selectors. HasCustomEventSelectors *bool `type:"boolean"` + // Specifies whether a trail has insight types specified in an InsightSelector + // list. + HasInsightSelectors *bool `type:"boolean"` + // The region in which the trail was created. HomeRegion *string `type:"string"` diff --git a/service/cloudtrail/cloudtrailiface/interface.go b/service/cloudtrail/cloudtrailiface/interface.go index c913fe19782..809f72aee13 100644 --- a/service/cloudtrail/cloudtrailiface/interface.go +++ b/service/cloudtrail/cloudtrailiface/interface.go @@ -71,6 +71,8 @@ type ClientAPI interface { GetEventSelectorsRequest(*cloudtrail.GetEventSelectorsInput) cloudtrail.GetEventSelectorsRequest + GetInsightSelectorsRequest(*cloudtrail.GetInsightSelectorsInput) cloudtrail.GetInsightSelectorsRequest + GetTrailRequest(*cloudtrail.GetTrailInput) cloudtrail.GetTrailRequest GetTrailStatusRequest(*cloudtrail.GetTrailStatusInput) cloudtrail.GetTrailStatusRequest @@ -85,6 +87,8 @@ type ClientAPI interface { PutEventSelectorsRequest(*cloudtrail.PutEventSelectorsInput) cloudtrail.PutEventSelectorsRequest + PutInsightSelectorsRequest(*cloudtrail.PutInsightSelectorsInput) cloudtrail.PutInsightSelectorsRequest + RemoveTagsRequest(*cloudtrail.RemoveTagsInput) cloudtrail.RemoveTagsRequest StartLoggingRequest(*cloudtrail.StartLoggingInput) cloudtrail.StartLoggingRequest diff --git a/service/cloudwatchlogs/api_op_CreateLogGroup.go b/service/cloudwatchlogs/api_op_CreateLogGroup.go index a5b891cdf23..c9ae1eb85cb 100644 --- a/service/cloudwatchlogs/api_op_CreateLogGroup.go +++ b/service/cloudwatchlogs/api_op_CreateLogGroup.go @@ -69,7 +69,7 @@ const opCreateLogGroup = "CreateLogGroup" // // Creates a log group with the specified name. // -// You can create up to 5000 log groups per account. +// You can create up to 20,000 log groups per account. // // You must use the following guidelines when naming a log group: // @@ -78,7 +78,8 @@ const opCreateLogGroup = "CreateLogGroup" // * Log group names can be between 1 and 512 characters long. // // * Log group names consist of the following characters: a-z, A-Z, 0-9, -// '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period). +// '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and +// '#' (number sign) // // If you associate a AWS Key Management Service (AWS KMS) customer master key // (CMK) with the log group, ingested data is encrypted using the CMK. This diff --git a/service/cloudwatchlogs/api_op_GetLogEvents.go b/service/cloudwatchlogs/api_op_GetLogEvents.go index 3367738de72..2fb020e6ab9 100644 --- a/service/cloudwatchlogs/api_op_GetLogEvents.go +++ b/service/cloudwatchlogs/api_op_GetLogEvents.go @@ -34,6 +34,8 @@ type GetLogEventsInput struct { // The token for the next set of items to return. (You received this token from // a previous call.) + // + // Using this token works only when you specify true for startFromHead. NextToken *string `locationName:"nextToken" min:"1" type:"string"` // If the value is true, the earliest log events are returned first. If the diff --git a/service/cloudwatchlogs/api_op_PutDestination.go b/service/cloudwatchlogs/api_op_PutDestination.go index 020dcc44937..d3f12f578e6 100644 --- a/service/cloudwatchlogs/api_op_PutDestination.go +++ b/service/cloudwatchlogs/api_op_PutDestination.go @@ -82,11 +82,12 @@ const opPutDestination = "PutDestination" // PutDestinationRequest returns a request value for making API operation for // Amazon CloudWatch Logs. // -// Creates or updates a destination. A destination encapsulates a physical resource -// (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time -// stream of log events for a different account, ingested using PutLogEvents. -// A destination can be an Amazon Kinesis stream, Amazon Kinesis Data Firehose -// strea, or an AWS Lambda function. +// Creates or updates a destination. This operation is used only to create destinations +// for cross-account subscriptions. +// +// A destination encapsulates a physical resource (such as an Amazon Kinesis +// stream) and enables you to subscribe to a real-time stream of log events +// for a different account, ingested using PutLogEvents. // // Through an access policy, a destination controls what is written to it. By // default, PutDestination does not set any access policy with the destination, diff --git a/service/cloudwatchlogs/api_op_StartQuery.go b/service/cloudwatchlogs/api_op_StartQuery.go index 41b966a3e56..e582de48625 100644 --- a/service/cloudwatchlogs/api_op_StartQuery.go +++ b/service/cloudwatchlogs/api_op_StartQuery.go @@ -21,6 +21,7 @@ type StartQueryInput struct { // The maximum number of log events to return in the query. If the query string // uses the fields command, only the specified fields and their values are returned. + // The default is 1000. Limit *int64 `locationName:"limit" min:"1" type:"integer"` // The log group on which to perform the query. diff --git a/service/cloudwatchlogs/api_types.go b/service/cloudwatchlogs/api_types.go index 0465be0b7ec..864332282dc 100644 --- a/service/cloudwatchlogs/api_types.go +++ b/service/cloudwatchlogs/api_types.go @@ -267,9 +267,9 @@ type LogStream struct { // The number of bytes stored. // - // IMPORTANT: Starting on June 17, 2019, this parameter will be deprecated for - // log streams, and will be reported as zero. This change applies only to log - // streams. The storedBytes parameter for log groups is not affected. + // IMPORTANT:On June 17, 2019, this parameter was deprecated for log streams, + // and is always reported as zero. This change applies only to log streams. + // The storedBytes parameter for log groups is not affected. StoredBytes *int64 `locationName:"storedBytes" deprecated:"true" type:"long"` // The sequence token. diff --git a/service/codebuild/api_doc.go b/service/codebuild/api_doc.go index 053accb8d46..b913de1bbfe 100644 --- a/service/codebuild/api_doc.go +++ b/service/codebuild/api_doc.go @@ -18,6 +18,8 @@ // // * BatchDeleteBuilds: Deletes one or more builds. // +// * BatchGetBuilds: Gets information about one or more builds. +// // * BatchGetProjects: Gets information about one or more build projects. // A build project defines how AWS CodeBuild runs a build. This includes // information such as where to get the source code to build, the build environment @@ -33,21 +35,21 @@ // CodeBuild to start rebuilding the source code every time a code change // is pushed to the repository. // -// * UpdateWebhook: Changes the settings of an existing webhook. -// // * DeleteProject: Deletes a build project. // +// * DeleteSourceCredentials: Deletes a set of GitHub, GitHub Enterprise, +// or Bitbucket source credentials. +// // * DeleteWebhook: For an existing AWS CodeBuild build project that has // its source code stored in a GitHub or Bitbucket repository, stops AWS // CodeBuild from rebuilding the source code every time a code change is // pushed to the repository. // -// * ListProjects: Gets a list of build project names, with each build project -// name representing a single build project. -// -// * UpdateProject: Changes the settings of an existing build project. +// * ImportSourceCredentials: Imports the source repository credentials for +// an AWS CodeBuild project that has its source code stored in a GitHub, +// GitHub Enterprise, or Bitbucket repository. // -// * BatchGetBuilds: Gets information about one or more builds. +// * InvalidateProjectCache: Resets the cache for a project. // // * ListBuilds: Gets a list of build IDs, with each build ID representing // a single build. @@ -55,24 +57,24 @@ // * ListBuildsForProject: Gets a list of build IDs for the specified build // project, with each build ID representing a single build. // -// * StartBuild: Starts running a build. -// -// * StopBuild: Attempts to stop running a build. -// // * ListCuratedEnvironmentImages: Gets information about Docker images that // are managed by AWS CodeBuild. // -// * DeleteSourceCredentials: Deletes a set of GitHub, GitHub Enterprise, -// or Bitbucket source credentials. -// -// * ImportSourceCredentials: Imports the source repository credentials for -// an AWS CodeBuild project that has its source code stored in a GitHub, -// GitHub Enterprise, or Bitbucket repository. +// * ListProjects: Gets a list of build project names, with each build project +// name representing a single build project. // // * ListSourceCredentials: Returns a list of SourceCredentialsInfo objects. // Each SourceCredentialsInfo object includes the authentication type, token // ARN, and type of source provider for one set of credentials. // +// * StartBuild: Starts running a build. +// +// * StopBuild: Attempts to stop running a build. +// +// * UpdateProject: Changes the settings of an existing build project. +// +// * UpdateWebhook: Changes the settings of an existing webhook. +// // See https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06 for more information on this service. // // See codebuild package documentation for more information. diff --git a/service/codebuild/api_enums.go b/service/codebuild/api_enums.go index 049653f0841..2d643b080d5 100644 --- a/service/codebuild/api_enums.go +++ b/service/codebuild/api_enums.go @@ -138,9 +138,10 @@ type ComputeType string // Enum values for ComputeType const ( - ComputeTypeBuildGeneral1Small ComputeType = "BUILD_GENERAL1_SMALL" - ComputeTypeBuildGeneral1Medium ComputeType = "BUILD_GENERAL1_MEDIUM" - ComputeTypeBuildGeneral1Large ComputeType = "BUILD_GENERAL1_LARGE" + ComputeTypeBuildGeneral1Small ComputeType = "BUILD_GENERAL1_SMALL" + ComputeTypeBuildGeneral1Medium ComputeType = "BUILD_GENERAL1_MEDIUM" + ComputeTypeBuildGeneral1Large ComputeType = "BUILD_GENERAL1_LARGE" + ComputeTypeBuildGeneral12xlarge ComputeType = "BUILD_GENERAL1_2XLARGE" ) func (enum ComputeType) MarshalValue() (string, error) { @@ -172,8 +173,10 @@ type EnvironmentType string // Enum values for EnvironmentType const ( - EnvironmentTypeWindowsContainer EnvironmentType = "WINDOWS_CONTAINER" - EnvironmentTypeLinuxContainer EnvironmentType = "LINUX_CONTAINER" + EnvironmentTypeWindowsContainer EnvironmentType = "WINDOWS_CONTAINER" + EnvironmentTypeLinuxContainer EnvironmentType = "LINUX_CONTAINER" + EnvironmentTypeLinuxGpuContainer EnvironmentType = "LINUX_GPU_CONTAINER" + EnvironmentTypeArmContainer EnvironmentType = "ARM_CONTAINER" ) func (enum EnvironmentType) MarshalValue() (string, error) { diff --git a/service/codebuild/api_op_BatchGetBuilds.go b/service/codebuild/api_op_BatchGetBuilds.go index 81f0c032437..a52dddd32d0 100644 --- a/service/codebuild/api_op_BatchGetBuilds.go +++ b/service/codebuild/api_op_BatchGetBuilds.go @@ -60,7 +60,7 @@ const opBatchGetBuilds = "BatchGetBuilds" // BatchGetBuildsRequest returns a request value for making API operation for // AWS CodeBuild. // -// Gets information about builds. +// Gets information about one or more builds. // // // Example sending a request using BatchGetBuildsRequest. // req := client.BatchGetBuildsRequest(params) diff --git a/service/codebuild/api_op_BatchGetProjects.go b/service/codebuild/api_op_BatchGetProjects.go index d16bc58defe..3e01210bfcc 100644 --- a/service/codebuild/api_op_BatchGetProjects.go +++ b/service/codebuild/api_op_BatchGetProjects.go @@ -60,7 +60,7 @@ const opBatchGetProjects = "BatchGetProjects" // BatchGetProjectsRequest returns a request value for making API operation for // AWS CodeBuild. // -// Gets information about build projects. +// Gets information about one or more build projects. // // // Example sending a request using BatchGetProjectsRequest. // req := client.BatchGetProjectsRequest(params) diff --git a/service/codebuild/api_types.go b/service/codebuild/api_types.go index 33faae8eaad..9d546487228 100644 --- a/service/codebuild/api_types.go +++ b/service/codebuild/api_types.go @@ -966,7 +966,23 @@ type ProjectEnvironment struct { // // * BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for builds. // - // * BUILD_GENERAL1_LARGE: Use up to 15 GB memory and 8 vCPUs for builds. + // * BUILD_GENERAL1_LARGE: Use up to 16 GB memory and 8 vCPUs for builds, + // depending on your environment type. + // + // * BUILD_GENERAL1_2XLARGE: Use up to 145 GB memory, 72 vCPUs, and 824 GB + // of SSD storage for builds. This compute type supports Docker images up + // to 100 GB uncompressed. + // + // If you use BUILD_GENERAL1_LARGE: + // + // * For environment type LINUX_CONTAINER, you can use up to 15 GB memory + // and 8 vCPUs for builds. + // + // * For environment type LINUX_GPU_CONTAINER, you can use up to 255 GB memory, + // 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds. + // + // * For environment type ARM_CONTAINER, you can use up to 16 GB memory and + // 8 vCPUs on ARM-based processors for builds. // // For more information, see Build Environment Compute Types (https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) // in the AWS CodeBuild User Guide. @@ -1036,6 +1052,22 @@ type ProjectEnvironment struct { // The type of build environment to use for related builds. // + // * The environment type ARM_CONTAINER is available only in regions US East + // (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific + // (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and EU (Frankfurt). + // + // * The environment type LINUX_CONTAINER with compute type build.general1.2xlarge + // is available only in regions US East (N. Virginia), US East (N. Virginia), + // US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), + // Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), + // Asia Pacific (Sydney), China (Beijing), and China (Ningxia). + // + // * The environment type LINUX_GPU_CONTAINER is available only in regions + // US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada + // (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), + // Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) + // , China (Beijing), and China (Ningxia). + // // Type is a required field Type EnvironmentType `locationName:"type" type:"string" required:"true" enum:"true"` } diff --git a/service/codecommit/api_doc.go b/service/codecommit/api_doc.go index a0b88e6f94e..b020c00c83b 100644 --- a/service/codecommit/api_doc.go +++ b/service/codecommit/api_doc.go @@ -27,12 +27,12 @@ // the repository. // // * UpdateRepositoryName, which changes the name of the repository. If you -// change the name of a repository, no other users of that repository will -// be able to access it until you send them the new HTTPS or SSH URL to use. +// change the name of a repository, no other users of that repository can +// access it until you send them the new HTTPS or SSH URL to use. // // Branches, by calling the following: // -// * CreateBranch, which creates a new branch in a specified repository. +// * CreateBranch, which creates a branch in a specified repository. // // * DeleteBranch, which deletes the specified branch in a repository unless // it is the default branch. @@ -49,7 +49,7 @@ // branch. // // * GetBlob, which returns the base-64 encoded content of an individual -// Git blob object within a repository. +// Git blob object in a repository. // // * GetFile, which returns the base-64 encoded content of a specified file. // @@ -61,7 +61,7 @@ // Commits, by calling the following: // // * BatchGetCommits, which returns information about one or more commits -// in a repository +// in a repository. // // * CreateCommit, which creates a commit for changes to a repository. // @@ -69,7 +69,7 @@ // messages and author and committer information. // // * GetDifferences, which returns information about the differences in a -// valid commit specifier (such as a branch, tag, HEAD, commit ID or other +// valid commit specifier (such as a branch, tag, HEAD, commit ID, or other // fully qualified reference). // // Merges, by calling the following: @@ -107,14 +107,31 @@ // // * CreatePullRequest, which creates a pull request in a specified repository. // +// * CreatePullRequestApprovalRule, which creates an approval rule for a +// specified pull request. +// +// * DeletePullRequestApprovalRule, which deletes an approval rule for a +// specified pull request. +// // * DescribePullRequestEvents, which returns information about one or more // pull request events. // +// * EvaluatePullRequestApprovalRules, which evaluates whether a pull request +// has met all the conditions specified in its associated approval rules. +// // * GetCommentsForPullRequest, which returns information about comments // on a specified pull request. // // * GetPullRequest, which returns information about a specified pull request. // +// * GetPullRequestApprovalStates, which returns information about the approval +// states for a specified pull request. +// +// * GetPullRequestOverrideState, which returns information about whether +// approval rules have been set aside (overriden) for a pull request, and +// if so, the Amazon Resource Name (ARN) of the user or identity that overrode +// the rules and their requirements for the pull request. +// // * ListPullRequests, which lists all pull requests for a repository. // // * MergePullRequestByFastForward, which merges the source destination branch @@ -129,9 +146,18 @@ // of a pull request into the specified destination branch for that pull // request using the three-way merge option. // +// * OverridePullRequestApprovalRules, which sets aside all approval rule +// requirements for a pull request. +// // * PostCommentForPullRequest, which posts a comment to a pull request at // the specified line, file, or request. // +// * UpdatePullRequestApprovalRuleContent, which updates the structure of +// an approval rule for a pull request. +// +// * UpdatePullRequestApprovalState, which updates the state of an approval +// on a pull request. +// // * UpdatePullRequestDescription, which updates the description of a pull // request. // @@ -139,6 +165,58 @@ // // * UpdatePullRequestTitle, which updates the title of a pull request. // +// Approval rule templates, by calling the following: +// +// * AssociateApprovalRuleTemplateWithRepository, which associates a template +// with a specified repository. After the template is associated with a repository, +// AWS CodeCommit creates approval rules that match the template conditions +// on every pull request created in the specified repository. +// +// * BatchAssociateApprovalRuleTemplateWithRepositories, which associates +// a template with one or more specified repositories. After the template +// is associated with a repository, AWS CodeCommit creates approval rules +// that match the template conditions on every pull request created in the +// specified repositories. +// +// * BatchDisassociateApprovalRuleTemplateFromRepositories, which removes +// the association between a template and specified repositories so that +// approval rules based on the template are not automatically created when +// pull requests are created in those repositories. +// +// * CreateApprovalRuleTemplate, which creates a template for approval rules +// that can then be associated with one or more repositories in your AWS +// account. +// +// * DeleteApprovalRuleTemplate, which deletes the specified template. It +// does not remove approval rules on pull requests already created with the +// template. +// +// * DisassociateApprovalRuleTemplateFromRepository, which removes the association +// between a template and a repository so that approval rules based on the +// template are not automatically created when pull requests are created +// in the specified repository. +// +// * GetApprovalRuleTemplate, which returns information about an approval +// rule template. +// +// * ListApprovalRuleTemplates, which lists all approval rule templates in +// the AWS Region in your AWS account. +// +// * ListAssociatedApprovalRuleTemplatesForRepository, which lists all approval +// rule templates that are associated with a specified repository. +// +// * ListRepositoriesForApprovalRuleTemplate, which lists all repositories +// associated with the specified approval rule template. +// +// * UpdateApprovalRuleTemplateDescription, which updates the description +// of an approval rule template. +// +// * UpdateApprovalRuleTemplateName, which updates the name of an approval +// rule template. +// +// * UpdateApprovalRuleTemplateContent, which updates the content of an approval +// rule template. +// // Comments in a repository, by calling the following: // // * DeleteCommentContent, which deletes the content of a comment on a commit diff --git a/service/codecommit/api_enums.go b/service/codecommit/api_enums.go index 6d1cc61163d..9ea358acfec 100644 --- a/service/codecommit/api_enums.go +++ b/service/codecommit/api_enums.go @@ -2,6 +2,23 @@ package codecommit +type ApprovalState string + +// Enum values for ApprovalState +const ( + ApprovalStateApprove ApprovalState = "APPROVE" + ApprovalStateRevoke ApprovalState = "REVOKE" +) + +func (enum ApprovalState) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ApprovalState) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type ChangeTypeEnum string // Enum values for ChangeTypeEnum @@ -128,6 +145,23 @@ func (enum OrderEnum) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type OverrideStatus string + +// Enum values for OverrideStatus +const ( + OverrideStatusOverride OverrideStatus = "OVERRIDE" + OverrideStatusRevoke OverrideStatus = "REVOKE" +) + +func (enum OverrideStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum OverrideStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type PullRequestEventType string // Enum values for PullRequestEventType @@ -136,6 +170,11 @@ const ( PullRequestEventTypePullRequestStatusChanged PullRequestEventType = "PULL_REQUEST_STATUS_CHANGED" PullRequestEventTypePullRequestSourceReferenceUpdated PullRequestEventType = "PULL_REQUEST_SOURCE_REFERENCE_UPDATED" PullRequestEventTypePullRequestMergeStateChanged PullRequestEventType = "PULL_REQUEST_MERGE_STATE_CHANGED" + PullRequestEventTypePullRequestApprovalRuleCreated PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_CREATED" + PullRequestEventTypePullRequestApprovalRuleUpdated PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_UPDATED" + PullRequestEventTypePullRequestApprovalRuleDeleted PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_DELETED" + PullRequestEventTypePullRequestApprovalRuleOverridden PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_OVERRIDDEN" + PullRequestEventTypePullRequestApprovalStateChanged PullRequestEventType = "PULL_REQUEST_APPROVAL_STATE_CHANGED" ) func (enum PullRequestEventType) MarshalValue() (string, error) { diff --git a/service/codecommit/api_errors.go b/service/codecommit/api_errors.go index c9c7bf90279..9320c2e95d1 100644 --- a/service/codecommit/api_errors.go +++ b/service/codecommit/api_errors.go @@ -10,6 +10,75 @@ const ( // The specified Amazon Resource Name (ARN) does not exist in the AWS account. ErrCodeActorDoesNotExistException = "ActorDoesNotExistException" + // ErrCodeApprovalRuleContentRequiredException for service response error code + // "ApprovalRuleContentRequiredException". + // + // The content for the approval rule is empty. You must provide some content + // for an approval rule. The content cannot be null. + ErrCodeApprovalRuleContentRequiredException = "ApprovalRuleContentRequiredException" + + // ErrCodeApprovalRuleDoesNotExistException for service response error code + // "ApprovalRuleDoesNotExistException". + // + // The specified approval rule does not exist. + ErrCodeApprovalRuleDoesNotExistException = "ApprovalRuleDoesNotExistException" + + // ErrCodeApprovalRuleNameAlreadyExistsException for service response error code + // "ApprovalRuleNameAlreadyExistsException". + // + // An approval rule with that name already exists. Approval rule names must + // be unique within the scope of a pull request. + ErrCodeApprovalRuleNameAlreadyExistsException = "ApprovalRuleNameAlreadyExistsException" + + // ErrCodeApprovalRuleNameRequiredException for service response error code + // "ApprovalRuleNameRequiredException". + // + // An approval rule name is required, but was not specified. + ErrCodeApprovalRuleNameRequiredException = "ApprovalRuleNameRequiredException" + + // ErrCodeApprovalRuleTemplateContentRequiredException for service response error code + // "ApprovalRuleTemplateContentRequiredException". + // + // The content for the approval rule template is empty. You must provide some + // content for an approval rule template. The content cannot be null. + ErrCodeApprovalRuleTemplateContentRequiredException = "ApprovalRuleTemplateContentRequiredException" + + // ErrCodeApprovalRuleTemplateDoesNotExistException for service response error code + // "ApprovalRuleTemplateDoesNotExistException". + // + // The specified approval rule template does not exist. Verify that the name + // is correct and that you are signed in to the AWS Region where the template + // was created, and then try again. + ErrCodeApprovalRuleTemplateDoesNotExistException = "ApprovalRuleTemplateDoesNotExistException" + + // ErrCodeApprovalRuleTemplateInUseException for service response error code + // "ApprovalRuleTemplateInUseException". + // + // The approval rule template is associated with one or more repositories. You + // cannot delete a template that is associated with a repository. Remove all + // associations, and then try again. + ErrCodeApprovalRuleTemplateInUseException = "ApprovalRuleTemplateInUseException" + + // ErrCodeApprovalRuleTemplateNameAlreadyExistsException for service response error code + // "ApprovalRuleTemplateNameAlreadyExistsException". + // + // You cannot create an approval rule template with that name because a template + // with that name already exists in this AWS Region for your AWS account. Approval + // rule template names must be unique. + ErrCodeApprovalRuleTemplateNameAlreadyExistsException = "ApprovalRuleTemplateNameAlreadyExistsException" + + // ErrCodeApprovalRuleTemplateNameRequiredException for service response error code + // "ApprovalRuleTemplateNameRequiredException". + // + // An approval rule template name is required, but was not specified. + ErrCodeApprovalRuleTemplateNameRequiredException = "ApprovalRuleTemplateNameRequiredException" + + // ErrCodeApprovalStateRequiredException for service response error code + // "ApprovalStateRequiredException". + // + // An approval state is required, but was not specified. + ErrCodeApprovalStateRequiredException = "ApprovalStateRequiredException" + // ErrCodeAuthorDoesNotExistException for service response error code // "AuthorDoesNotExistException". // @@ -32,7 +101,7 @@ const ( // ErrCodeBlobIdRequiredException for service response error code // "BlobIdRequiredException". // - // A blob ID is required but was not specified. + // A blob ID is required, but was not specified. ErrCodeBlobIdRequiredException = "BlobIdRequiredException" // ErrCodeBranchDoesNotExistException for service response error code @@ -50,17 +119,31 @@ const ( // ErrCodeBranchNameIsTagNameException for service response error code // "BranchNameIsTagNameException". // - // The specified branch name is not valid because it is a tag name. Type the - // name of a current branch in the repository. For a list of valid branch names, - // use ListBranches. + // The specified branch name is not valid because it is a tag name. Enter the + // name of a branch in the repository. For a list of valid branch names, use + // ListBranches. ErrCodeBranchNameIsTagNameException = "BranchNameIsTagNameException" // ErrCodeBranchNameRequiredException for service response error code // "BranchNameRequiredException". // - // A branch name is required but was not specified. + // A branch name is required, but was not specified. ErrCodeBranchNameRequiredException = "BranchNameRequiredException" + // ErrCodeCannotDeleteApprovalRuleFromTemplateException for service response error code + // "CannotDeleteApprovalRuleFromTemplateException". + // + // The approval rule cannot be deleted from the pull request because it was + // created by an approval rule template and applied to the pull request automatically. + ErrCodeCannotDeleteApprovalRuleFromTemplateException = "CannotDeleteApprovalRuleFromTemplateException" + + // ErrCodeCannotModifyApprovalRuleFromTemplateException for service response error code + // "CannotModifyApprovalRuleFromTemplateException". + // + // The approval rule cannot be modified for the pull request because it was + // created by an approval rule template and applied to the pull request automatically. + ErrCodeCannotModifyApprovalRuleFromTemplateException = "CannotModifyApprovalRuleFromTemplateException" + // ErrCodeCommentContentRequiredException for service response error code // "CommentContentRequiredException". // @@ -84,8 +167,8 @@ const ( // ErrCodeCommentDoesNotExistException for service response error code // "CommentDoesNotExistException". // - // No comment exists with the provided ID. Verify that you have provided the - // correct ID, and then try again. + // No comment exists with the provided ID. Verify that you have used the correct + // ID, and then try again. ErrCodeCommentDoesNotExistException = "CommentDoesNotExistException" // ErrCodeCommentIdRequiredException for service response error code @@ -130,6 +213,9 @@ const ( // ErrCodeCommitIdsListRequiredException for service response error code // "CommitIdsListRequiredException". + // + // A list of commit IDs is required, but was either not specified or the list + // was empty. ErrCodeCommitIdsListRequiredException = "CommitIdsListRequiredException" // ErrCodeCommitMessageLengthExceededException for service response error code @@ -204,7 +290,7 @@ const ( // // The commit cannot be created because both a source file and file content // have been specified for the same file. You cannot provide both. Either specify - // a source file, or provide the file content directly. + // a source file or provide the file content directly. ErrCodeFileContentAndSourceFileSpecifiedException = "FileContentAndSourceFileSpecifiedException" // ErrCodeFileContentRequiredException for service response error code @@ -217,16 +303,16 @@ const ( // ErrCodeFileContentSizeLimitExceededException for service response error code // "FileContentSizeLimitExceededException". // - // The file cannot be added because it is too large. The maximum file size that - // can be added is 6 MB, and the combined file content change size is 7 MB. - // Consider making these changes using a Git client. + // The file cannot be added because it is too large. The maximum file size is + // 6 MB, and the combined file content change size is 7 MB. Consider making + // these changes using a Git client. ErrCodeFileContentSizeLimitExceededException = "FileContentSizeLimitExceededException" // ErrCodeFileDoesNotExistException for service response error code // "FileDoesNotExistException". // - // The specified file does not exist. Verify that you have provided the correct - // name of the file, including its full path and extension. + // The specified file does not exist. Verify that you have used the correct + // file name, full path, and extension. ErrCodeFileDoesNotExistException = "FileDoesNotExistException" // ErrCodeFileEntryRequiredException for service response error code @@ -239,8 +325,8 @@ const ( // ErrCodeFileModeRequiredException for service response error code // "FileModeRequiredException". // - // The commit cannot be created because a file mode is required to update mode - // permissions for an existing file, but no file mode has been specified. + // The commit cannot be created because no file mode has been specified. A file + // mode is required to update mode permissions for a file. ErrCodeFileModeRequiredException = "FileModeRequiredException" // ErrCodeFileNameConflictsWithDirectoryNameException for service response error code @@ -281,14 +367,14 @@ const ( // "FolderDoesNotExistException". // // The specified folder does not exist. Either the folder name is not correct, - // or you did not provide the full path to the folder. + // or you did not enter the full path to the folder. ErrCodeFolderDoesNotExistException = "FolderDoesNotExistException" // ErrCodeIdempotencyParameterMismatchException for service response error code // "IdempotencyParameterMismatchException". // // The client request token is not valid. Either the token is not in a valid - // format, or the token has been used in a previous request and cannot be re-used. + // format, or the token has been used in a previous request and cannot be reused. ErrCodeIdempotencyParameterMismatchException = "IdempotencyParameterMismatchException" // ErrCodeInvalidActorArnException for service response error code @@ -299,6 +385,47 @@ const ( // and then try again. ErrCodeInvalidActorArnException = "InvalidActorArnException" + // ErrCodeInvalidApprovalRuleContentException for service response error code + // "InvalidApprovalRuleContentException". + // + // The content for the approval rule is not valid. + ErrCodeInvalidApprovalRuleContentException = "InvalidApprovalRuleContentException" + + // ErrCodeInvalidApprovalRuleNameException for service response error code + // "InvalidApprovalRuleNameException". + // + // The name for the approval rule is not valid. + ErrCodeInvalidApprovalRuleNameException = "InvalidApprovalRuleNameException" + + // ErrCodeInvalidApprovalRuleTemplateContentException for service response error code + // "InvalidApprovalRuleTemplateContentException". + // + // The content of the approval rule template is not valid. + ErrCodeInvalidApprovalRuleTemplateContentException = "InvalidApprovalRuleTemplateContentException" + + // ErrCodeInvalidApprovalRuleTemplateDescriptionException for service response error code + // "InvalidApprovalRuleTemplateDescriptionException". + // + // The description for the approval rule template is not valid because it exceeds + // the maximum characters allowed for a description. For more information about + // limits in AWS CodeCommit, see AWS CodeCommit User Guide (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). + ErrCodeInvalidApprovalRuleTemplateDescriptionException = "InvalidApprovalRuleTemplateDescriptionException" + + // ErrCodeInvalidApprovalRuleTemplateNameException for service response error code + // "InvalidApprovalRuleTemplateNameException". + // + // The name of the approval rule template is not valid. Template names must + // be between 1 and 100 valid characters in length. For more information about + // limits in AWS CodeCommit, see AWS CodeCommit User Guide (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). + ErrCodeInvalidApprovalRuleTemplateNameException = "InvalidApprovalRuleTemplateNameException" + + // ErrCodeInvalidApprovalStateException for service response error code + // "InvalidApprovalStateException". + // + // The state for the approval is not valid. Valid values include APPROVE and + // REVOKE. + ErrCodeInvalidApprovalStateException = "InvalidApprovalStateException" + // ErrCodeInvalidAuthorArnException for service response error code // "InvalidAuthorArnException". // @@ -376,8 +503,8 @@ const ( // ErrCodeInvalidDescriptionException for service response error code // "InvalidDescriptionException". // - // The pull request description is not valid. Descriptions are limited to 1,000 - // characters in length. + // The pull request description is not valid. Descriptions cannot be more than + // 1,000 characters. ErrCodeInvalidDescriptionException = "InvalidDescriptionException" // ErrCodeInvalidDestinationCommitSpecifierException for service response error code @@ -398,8 +525,8 @@ const ( // ErrCodeInvalidFileLocationException for service response error code // "InvalidFileLocationException". // - // The location of the file is not valid. Make sure that you include the extension - // of the file as well as the file name. + // The location of the file is not valid. Make sure that you include the file + // name and extension. ErrCodeInvalidFileLocationException = "InvalidFileLocationException" // ErrCodeInvalidFileModeException for service response error code @@ -447,6 +574,12 @@ const ( // The specified sort order is not valid. ErrCodeInvalidOrderException = "InvalidOrderException" + // ErrCodeInvalidOverrideStatusException for service response error code + // "InvalidOverrideStatusException". + // + // The override status is not valid. Valid statuses are OVERRIDE and REVOKE. + ErrCodeInvalidOverrideStatusException = "InvalidOverrideStatusException" + // ErrCodeInvalidParentCommitIdException for service response error code // "InvalidParentCommitIdException". // @@ -493,7 +626,7 @@ const ( // "InvalidReferenceNameException". // // The specified reference name format is not valid. Reference names must conform - // to the Git references format, for example refs/heads/master. For more information, + // to the Git references format (for example, refs/heads/master). For more information, // see Git Internals - Git References (https://git-scm.com/book/en/v2/Git-Internals-Git-References) // or consult your Git documentation. ErrCodeInvalidReferenceNameException = "InvalidReferenceNameException" @@ -528,9 +661,9 @@ const ( // ErrCodeInvalidRepositoryNameException for service response error code // "InvalidRepositoryNameException". // - // At least one specified repository name is not valid. + // A specified repository name is not valid. // - // This exception only occurs when a specified repository name is not valid. + // This exception occurs only when a specified repository name is not valid. // Other exceptions occur when a required repository parameter is missing, or // when a specified repository does not exist. ErrCodeInvalidRepositoryNameException = "InvalidRepositoryNameException" @@ -571,8 +704,9 @@ const ( // ErrCodeInvalidRepositoryTriggerRegionException for service response error code // "InvalidRepositoryTriggerRegionException". // - // The region for the trigger target does not match the region for the repository. - // Triggers must be created in the same region as the target for the trigger. + // The AWS Region for the trigger target does not match the AWS Region for the + // repository. Triggers must be created in the same Region as the target for + // the trigger. ErrCodeInvalidRepositoryTriggerRegionException = "InvalidRepositoryTriggerRegionException" // ErrCodeInvalidResourceArnException for service response error code @@ -583,6 +717,18 @@ const ( // in the AWS CodeCommit User Guide. ErrCodeInvalidResourceArnException = "InvalidResourceArnException" + // ErrCodeInvalidRevisionIdException for service response error code + // "InvalidRevisionIdException". + // + // The revision ID is not valid. Use GetPullRequest to determine the value. + ErrCodeInvalidRevisionIdException = "InvalidRevisionIdException" + + // ErrCodeInvalidRuleContentSha256Exception for service response error code + // "InvalidRuleContentSha256Exception". + // + // The SHA-256 hash signature for the rule content is not valid. + ErrCodeInvalidRuleContentSha256Exception = "InvalidRuleContentSha256Exception" + // ErrCodeInvalidSortByException for service response error code // "InvalidSortByException". // @@ -680,10 +826,17 @@ const ( // ErrCodeMaximumItemsToCompareExceededException for service response error code // "MaximumItemsToCompareExceededException". // - // The maximum number of items to compare between the source or destination - // branches and the merge base has exceeded the maximum allowed. + // The number of items to compare between the source or destination branches + // and the merge base has exceeded the maximum allowed. ErrCodeMaximumItemsToCompareExceededException = "MaximumItemsToCompareExceededException" + // ErrCodeMaximumNumberOfApprovalsExceededException for service response error code + // "MaximumNumberOfApprovalsExceededException". + // + // The number of approvals required for the approval rule exceeds the maximum + // number allowed. + ErrCodeMaximumNumberOfApprovalsExceededException = "MaximumNumberOfApprovalsExceededException" + // ErrCodeMaximumOpenPullRequestsExceededException for service response error code // "MaximumOpenPullRequestsExceededException". // @@ -696,7 +849,7 @@ const ( // "MaximumRepositoryNamesExceededException". // // The maximum number of allowed repository names was exceeded. Currently, this - // number is 25. + // number is 100. ErrCodeMaximumRepositoryNamesExceededException = "MaximumRepositoryNamesExceededException" // ErrCodeMaximumRepositoryTriggersExceededException for service response error code @@ -705,6 +858,13 @@ const ( // The number of triggers allowed for the repository was exceeded. ErrCodeMaximumRepositoryTriggersExceededException = "MaximumRepositoryTriggersExceededException" + // ErrCodeMaximumRuleTemplatesAssociatedWithRepositoryException for service response error code + // "MaximumRuleTemplatesAssociatedWithRepositoryException". + // + // The maximum number of approval rule templates for a repository has been exceeded. + // You cannot associate more than 25 approval rule templates with a repository. + ErrCodeMaximumRuleTemplatesAssociatedWithRepositoryException = "MaximumRuleTemplatesAssociatedWithRepositoryException" + // ErrCodeMergeOptionRequiredException for service response error code // "MergeOptionRequiredException". // @@ -740,6 +900,33 @@ const ( // as a result of this commit. A commit must contain at least one change. ErrCodeNoChangeException = "NoChangeException" + // ErrCodeNumberOfRuleTemplatesExceededException for service response error code + // "NumberOfRuleTemplatesExceededException". + // + // The maximum number of approval rule templates has been exceeded for this + // AWS Region. + ErrCodeNumberOfRuleTemplatesExceededException = "NumberOfRuleTemplatesExceededException" + + // ErrCodeNumberOfRulesExceededException for service response error code + // "NumberOfRulesExceededException". + // + // The approval rule cannot be added. The pull request has the maximum number + // of approval rules associated with it. + ErrCodeNumberOfRulesExceededException = "NumberOfRulesExceededException" + + // ErrCodeOverrideAlreadySetException for service response error code + // "OverrideAlreadySetException". + // + // The pull request has already had its approval rules set to override. + ErrCodeOverrideAlreadySetException = "OverrideAlreadySetException" + + // ErrCodeOverrideStatusRequiredException for service response error code + // "OverrideStatusRequiredException". + // + // An override status is required, but no value was provided. Valid values include + // OVERRIDE and REVOKE. + ErrCodeOverrideStatusRequiredException = "OverrideStatusRequiredException" + // ErrCodeParentCommitDoesNotExistException for service response error code // "ParentCommitDoesNotExistException". // @@ -781,6 +968,21 @@ const ( // The pull request status cannot be updated because it is already closed. ErrCodePullRequestAlreadyClosedException = "PullRequestAlreadyClosedException" + // ErrCodePullRequestApprovalRulesNotSatisfiedException for service response error code + // "PullRequestApprovalRulesNotSatisfiedException". + // + // The pull request cannot be merged because one or more approval rules applied + // to the pull request have conditions that have not been met. + ErrCodePullRequestApprovalRulesNotSatisfiedException = "PullRequestApprovalRulesNotSatisfiedException" + + // ErrCodePullRequestCannotBeApprovedByAuthorException for service response error code + // "PullRequestCannotBeApprovedByAuthorException". + // + // The approval cannot be applied because the user approving the pull request + // matches the user who created the pull request. You cannot approve a pull + // request that you created. + ErrCodePullRequestCannotBeApprovedByAuthorException = "PullRequestCannotBeApprovedByAuthorException" + // ErrCodePullRequestDoesNotExistException for service response error code // "PullRequestDoesNotExistException". // @@ -828,7 +1030,7 @@ const ( // ErrCodeReplacementContentRequiredException for service response error code // "ReplacementContentRequiredException". // - // USE_NEW_CONTENT was specified but no replacement content has been provided. + // USE_NEW_CONTENT was specified, but no replacement content has been provided. ErrCodeReplacementContentRequiredException = "ReplacementContentRequiredException" // ErrCodeReplacementTypeRequiredException for service response error code @@ -858,13 +1060,13 @@ const ( // ErrCodeRepositoryNameRequiredException for service response error code // "RepositoryNameRequiredException". // - // A repository name is required but was not specified. + // A repository name is required, but was not specified. ErrCodeRepositoryNameRequiredException = "RepositoryNameRequiredException" // ErrCodeRepositoryNamesRequiredException for service response error code // "RepositoryNamesRequiredException". // - // A repository names object is required but was not specified. + // At least one repository name object is required, but was not specified. ErrCodeRepositoryNamesRequiredException = "RepositoryNamesRequiredException" // ErrCodeRepositoryNotAssociatedWithPullRequestException for service response error code @@ -878,43 +1080,43 @@ const ( // ErrCodeRepositoryTriggerBranchNameListRequiredException for service response error code // "RepositoryTriggerBranchNameListRequiredException". // - // At least one branch name is required but was not specified in the trigger + // At least one branch name is required, but was not specified in the trigger // configuration. ErrCodeRepositoryTriggerBranchNameListRequiredException = "RepositoryTriggerBranchNameListRequiredException" // ErrCodeRepositoryTriggerDestinationArnRequiredException for service response error code // "RepositoryTriggerDestinationArnRequiredException". // - // A destination ARN for the target service for the trigger is required but + // A destination ARN for the target service for the trigger is required, but // was not specified. ErrCodeRepositoryTriggerDestinationArnRequiredException = "RepositoryTriggerDestinationArnRequiredException" // ErrCodeRepositoryTriggerEventsListRequiredException for service response error code // "RepositoryTriggerEventsListRequiredException". // - // At least one event for the trigger is required but was not specified. + // At least one event for the trigger is required, but was not specified. ErrCodeRepositoryTriggerEventsListRequiredException = "RepositoryTriggerEventsListRequiredException" // ErrCodeRepositoryTriggerNameRequiredException for service response error code // "RepositoryTriggerNameRequiredException". // - // A name for the trigger is required but was not specified. + // A name for the trigger is required, but was not specified. ErrCodeRepositoryTriggerNameRequiredException = "RepositoryTriggerNameRequiredException" // ErrCodeRepositoryTriggersListRequiredException for service response error code // "RepositoryTriggersListRequiredException". // - // The list of triggers for the repository is required but was not specified. + // The list of triggers for the repository is required, but was not specified. ErrCodeRepositoryTriggersListRequiredException = "RepositoryTriggersListRequiredException" // ErrCodeRequestTokenRequiredException for service response error code // "RequestTokenRequiredException". // // A client request token is required. A client request token is an unique, - // client-generated idempotency token that when provided in a request, ensures + // client-generated idempotency token that, when provided in a request, ensures // the request cannot be repeated with a changed parameter. If a request is - // received with the same parameters and a token is included, the request will - // return information about the initial request that used that token. + // received with the same parameters and a token is included, the request returns + // information about the initial request that used that token. ErrCodeRequestTokenRequiredException = "RequestTokenRequiredException" // ErrCodeResourceArnRequiredException for service response error code @@ -933,6 +1135,19 @@ const ( // or moving a .gitkeep file. ErrCodeRestrictedSourceFileException = "RestrictedSourceFileException" + // ErrCodeRevisionIdRequiredException for service response error code + // "RevisionIdRequiredException". + // + // A revision ID is required, but was not provided. + ErrCodeRevisionIdRequiredException = "RevisionIdRequiredException" + + // ErrCodeRevisionNotCurrentException for service response error code + // "RevisionNotCurrentException". + // + // The revision ID provided in the request does not match the current revision + // ID. Use GetPullRequest to retrieve the current revision ID. + ErrCodeRevisionNotCurrentException = "RevisionNotCurrentException" + // ErrCodeSameFileContentException for service response error code // "SameFileContentException". // @@ -953,8 +1168,8 @@ const ( // ErrCodeSourceAndDestinationAreSameException for service response error code // "SourceAndDestinationAreSameException". // - // The source branch and the destination branch for the pull request are the - // same. You must specify different branches for the source and destination. + // The source branch and destination branch for the pull request are the same. + // You must specify different branches for the source and destination. ErrCodeSourceAndDestinationAreSameException = "SourceAndDestinationAreSameException" // ErrCodeSourceFileOrContentRequiredException for service response error code diff --git a/service/codecommit/api_op_AssociateApprovalRuleTemplateWithRepository.go b/service/codecommit/api_op_AssociateApprovalRuleTemplateWithRepository.go new file mode 100644 index 00000000000..ba138cd6bfb --- /dev/null +++ b/service/codecommit/api_op_AssociateApprovalRuleTemplateWithRepository.go @@ -0,0 +1,140 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" +) + +type AssociateApprovalRuleTemplateWithRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name for the approval rule template. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` + + // The name of the repository that you want to associate with the template. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateApprovalRuleTemplateWithRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateApprovalRuleTemplateWithRepositoryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AssociateApprovalRuleTemplateWithRepositoryInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if s.RepositoryName == nil { + invalidParams.Add(aws.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AssociateApprovalRuleTemplateWithRepositoryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateApprovalRuleTemplateWithRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +const opAssociateApprovalRuleTemplateWithRepository = "AssociateApprovalRuleTemplateWithRepository" + +// AssociateApprovalRuleTemplateWithRepositoryRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Creates an association between an approval rule template and a specified +// repository. Then, the next time a pull request is created in the repository +// where the destination reference (if specified) matches the destination reference +// (branch) for the pull request, an approval rule that matches the template +// conditions is automatically created for that pull request. If no destination +// references are specified in the template, an approval rule that matches the +// template contents is created for all pull requests in that repository. +// +// // Example sending a request using AssociateApprovalRuleTemplateWithRepositoryRequest. +// req := client.AssociateApprovalRuleTemplateWithRepositoryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/AssociateApprovalRuleTemplateWithRepository +func (c *Client) AssociateApprovalRuleTemplateWithRepositoryRequest(input *AssociateApprovalRuleTemplateWithRepositoryInput) AssociateApprovalRuleTemplateWithRepositoryRequest { + op := &aws.Operation{ + Name: opAssociateApprovalRuleTemplateWithRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateApprovalRuleTemplateWithRepositoryInput{} + } + + req := c.newRequest(op, input, &AssociateApprovalRuleTemplateWithRepositoryOutput{}) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return AssociateApprovalRuleTemplateWithRepositoryRequest{Request: req, Input: input, Copy: c.AssociateApprovalRuleTemplateWithRepositoryRequest} +} + +// AssociateApprovalRuleTemplateWithRepositoryRequest is the request type for the +// AssociateApprovalRuleTemplateWithRepository API operation. +type AssociateApprovalRuleTemplateWithRepositoryRequest struct { + *aws.Request + Input *AssociateApprovalRuleTemplateWithRepositoryInput + Copy func(*AssociateApprovalRuleTemplateWithRepositoryInput) AssociateApprovalRuleTemplateWithRepositoryRequest +} + +// Send marshals and sends the AssociateApprovalRuleTemplateWithRepository API request. +func (r AssociateApprovalRuleTemplateWithRepositoryRequest) Send(ctx context.Context) (*AssociateApprovalRuleTemplateWithRepositoryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &AssociateApprovalRuleTemplateWithRepositoryResponse{ + AssociateApprovalRuleTemplateWithRepositoryOutput: r.Request.Data.(*AssociateApprovalRuleTemplateWithRepositoryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// AssociateApprovalRuleTemplateWithRepositoryResponse is the response type for the +// AssociateApprovalRuleTemplateWithRepository API operation. +type AssociateApprovalRuleTemplateWithRepositoryResponse struct { + *AssociateApprovalRuleTemplateWithRepositoryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// AssociateApprovalRuleTemplateWithRepository request. +func (r *AssociateApprovalRuleTemplateWithRepositoryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_BatchAssociateApprovalRuleTemplateWithRepositories.go b/service/codecommit/api_op_BatchAssociateApprovalRuleTemplateWithRepositories.go new file mode 100644 index 00000000000..03500daab96 --- /dev/null +++ b/service/codecommit/api_op_BatchAssociateApprovalRuleTemplateWithRepositories.go @@ -0,0 +1,142 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type BatchAssociateApprovalRuleTemplateWithRepositoriesInput struct { + _ struct{} `type:"structure"` + + // The name of the template you want to associate with one or more repositories. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` + + // The names of the repositories you want to associate with the template. + // + // The length constraint limit is for each string in the array. The array itself + // can be empty. + // + // RepositoryNames is a required field + RepositoryNames []string `locationName:"repositoryNames" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchAssociateApprovalRuleTemplateWithRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchAssociateApprovalRuleTemplateWithRepositoriesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchAssociateApprovalRuleTemplateWithRepositoriesInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if s.RepositoryNames == nil { + invalidParams.Add(aws.NewErrParamRequired("RepositoryNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BatchAssociateApprovalRuleTemplateWithRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of names of the repositories that have been associated with the template. + // + // AssociatedRepositoryNames is a required field + AssociatedRepositoryNames []string `locationName:"associatedRepositoryNames" type:"list" required:"true"` + + // A list of any errors that might have occurred while attempting to create + // the association between the template and the repositories. + // + // Errors is a required field + Errors []BatchAssociateApprovalRuleTemplateWithRepositoriesError `locationName:"errors" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchAssociateApprovalRuleTemplateWithRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +const opBatchAssociateApprovalRuleTemplateWithRepositories = "BatchAssociateApprovalRuleTemplateWithRepositories" + +// BatchAssociateApprovalRuleTemplateWithRepositoriesRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Creates an association between an approval rule template and one or more +// specified repositories. +// +// // Example sending a request using BatchAssociateApprovalRuleTemplateWithRepositoriesRequest. +// req := client.BatchAssociateApprovalRuleTemplateWithRepositoriesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/BatchAssociateApprovalRuleTemplateWithRepositories +func (c *Client) BatchAssociateApprovalRuleTemplateWithRepositoriesRequest(input *BatchAssociateApprovalRuleTemplateWithRepositoriesInput) BatchAssociateApprovalRuleTemplateWithRepositoriesRequest { + op := &aws.Operation{ + Name: opBatchAssociateApprovalRuleTemplateWithRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchAssociateApprovalRuleTemplateWithRepositoriesInput{} + } + + req := c.newRequest(op, input, &BatchAssociateApprovalRuleTemplateWithRepositoriesOutput{}) + return BatchAssociateApprovalRuleTemplateWithRepositoriesRequest{Request: req, Input: input, Copy: c.BatchAssociateApprovalRuleTemplateWithRepositoriesRequest} +} + +// BatchAssociateApprovalRuleTemplateWithRepositoriesRequest is the request type for the +// BatchAssociateApprovalRuleTemplateWithRepositories API operation. +type BatchAssociateApprovalRuleTemplateWithRepositoriesRequest struct { + *aws.Request + Input *BatchAssociateApprovalRuleTemplateWithRepositoriesInput + Copy func(*BatchAssociateApprovalRuleTemplateWithRepositoriesInput) BatchAssociateApprovalRuleTemplateWithRepositoriesRequest +} + +// Send marshals and sends the BatchAssociateApprovalRuleTemplateWithRepositories API request. +func (r BatchAssociateApprovalRuleTemplateWithRepositoriesRequest) Send(ctx context.Context) (*BatchAssociateApprovalRuleTemplateWithRepositoriesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &BatchAssociateApprovalRuleTemplateWithRepositoriesResponse{ + BatchAssociateApprovalRuleTemplateWithRepositoriesOutput: r.Request.Data.(*BatchAssociateApprovalRuleTemplateWithRepositoriesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// BatchAssociateApprovalRuleTemplateWithRepositoriesResponse is the response type for the +// BatchAssociateApprovalRuleTemplateWithRepositories API operation. +type BatchAssociateApprovalRuleTemplateWithRepositoriesResponse struct { + *BatchAssociateApprovalRuleTemplateWithRepositoriesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// BatchAssociateApprovalRuleTemplateWithRepositories request. +func (r *BatchAssociateApprovalRuleTemplateWithRepositoriesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_BatchDescribeMergeConflicts.go b/service/codecommit/api_op_BatchDescribeMergeConflicts.go index 38f5c5b1564..54db1a5fed2 100644 --- a/service/codecommit/api_op_BatchDescribeMergeConflicts.go +++ b/service/codecommit/api_op_BatchDescribeMergeConflicts.go @@ -13,20 +13,19 @@ type BatchDescribeMergeConflictsInput struct { _ struct{} `type:"structure"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -46,7 +45,7 @@ type BatchDescribeMergeConflictsInput struct { // MergeOption is a required field MergeOption MergeOptionTypeEnum `locationName:"mergeOption" type:"string" required:"true" enum:"true"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` @@ -57,7 +56,7 @@ type BatchDescribeMergeConflictsInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` diff --git a/service/codecommit/api_op_BatchDisassociateApprovalRuleTemplateFromRepositories.go b/service/codecommit/api_op_BatchDisassociateApprovalRuleTemplateFromRepositories.go new file mode 100644 index 00000000000..eb4d5a10b24 --- /dev/null +++ b/service/codecommit/api_op_BatchDisassociateApprovalRuleTemplateFromRepositories.go @@ -0,0 +1,144 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type BatchDisassociateApprovalRuleTemplateFromRepositoriesInput struct { + _ struct{} `type:"structure"` + + // The name of the template that you want to disassociate from one or more repositories. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` + + // The repository names that you want to disassociate from the approval rule + // template. + // + // The length constraint limit is for each string in the array. The array itself + // can be empty. + // + // RepositoryNames is a required field + RepositoryNames []string `locationName:"repositoryNames" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchDisassociateApprovalRuleTemplateFromRepositoriesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDisassociateApprovalRuleTemplateFromRepositoriesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchDisassociateApprovalRuleTemplateFromRepositoriesInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if s.RepositoryNames == nil { + invalidParams.Add(aws.NewErrParamRequired("RepositoryNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput struct { + _ struct{} `type:"structure"` + + // A list of repository names that have had their association with the template + // removed. + // + // DisassociatedRepositoryNames is a required field + DisassociatedRepositoryNames []string `locationName:"disassociatedRepositoryNames" type:"list" required:"true"` + + // A list of any errors that might have occurred while attempting to remove + // the association between the template and the repositories. + // + // Errors is a required field + Errors []BatchDisassociateApprovalRuleTemplateFromRepositoriesError `locationName:"errors" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput) String() string { + return awsutil.Prettify(s) +} + +const opBatchDisassociateApprovalRuleTemplateFromRepositories = "BatchDisassociateApprovalRuleTemplateFromRepositories" + +// BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Removes the association between an approval rule template and one or more +// specified repositories. +// +// // Example sending a request using BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest. +// req := client.BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/BatchDisassociateApprovalRuleTemplateFromRepositories +func (c *Client) BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest(input *BatchDisassociateApprovalRuleTemplateFromRepositoriesInput) BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest { + op := &aws.Operation{ + Name: opBatchDisassociateApprovalRuleTemplateFromRepositories, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchDisassociateApprovalRuleTemplateFromRepositoriesInput{} + } + + req := c.newRequest(op, input, &BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput{}) + return BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest{Request: req, Input: input, Copy: c.BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest} +} + +// BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest is the request type for the +// BatchDisassociateApprovalRuleTemplateFromRepositories API operation. +type BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest struct { + *aws.Request + Input *BatchDisassociateApprovalRuleTemplateFromRepositoriesInput + Copy func(*BatchDisassociateApprovalRuleTemplateFromRepositoriesInput) BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest +} + +// Send marshals and sends the BatchDisassociateApprovalRuleTemplateFromRepositories API request. +func (r BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest) Send(ctx context.Context) (*BatchDisassociateApprovalRuleTemplateFromRepositoriesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &BatchDisassociateApprovalRuleTemplateFromRepositoriesResponse{ + BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput: r.Request.Data.(*BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// BatchDisassociateApprovalRuleTemplateFromRepositoriesResponse is the response type for the +// BatchDisassociateApprovalRuleTemplateFromRepositories API operation. +type BatchDisassociateApprovalRuleTemplateFromRepositoriesResponse struct { + *BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// BatchDisassociateApprovalRuleTemplateFromRepositories request. +func (r *BatchDisassociateApprovalRuleTemplateFromRepositoriesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_BatchGetCommits.go b/service/codecommit/api_op_BatchGetCommits.go index 3180c7ff3cd..30ca2f29ffc 100644 --- a/service/codecommit/api_op_BatchGetCommits.go +++ b/service/codecommit/api_op_BatchGetCommits.go @@ -14,7 +14,8 @@ type BatchGetCommitsInput struct { // The full commit IDs of the commits to get information about. // - // You must supply the full SHAs of each commit. You cannot use shortened SHAs. + // You must supply the full SHA IDs of each commit. You cannot use shortened + // SHA IDs. // // CommitIds is a required field CommitIds []string `locationName:"commitIds" type:"list" required:"true"` @@ -59,9 +60,8 @@ type BatchGetCommitsOutput struct { Commits []Commit `locationName:"commits" type:"list"` // Returns any commit IDs for which information could not be found. For example, - // if one of the commit IDs was a shortened SHA or that commit was not found - // in the specified repository, the ID will return an error object with additional - // information. + // if one of the commit IDs was a shortened SHA ID or that commit was not found + // in the specified repository, the ID returns an error object with more information. Errors []BatchGetCommitsError `locationName:"errors" type:"list"` } diff --git a/service/codecommit/api_op_BatchGetRepositories.go b/service/codecommit/api_op_BatchGetRepositories.go index eb5f9b2251c..660270e0cde 100644 --- a/service/codecommit/api_op_BatchGetRepositories.go +++ b/service/codecommit/api_op_BatchGetRepositories.go @@ -15,6 +15,9 @@ type BatchGetRepositoriesInput struct { // The names of the repositories to get information about. // + // The length constraint limit is for each string in the array. The array itself + // can be empty. + // // RepositoryNames is a required field RepositoryNames []string `locationName:"repositoryNames" type:"list" required:"true"` } @@ -63,9 +66,9 @@ const opBatchGetRepositories = "BatchGetRepositories" // // The description field for a repository accepts all HTML characters and all // valid Unicode characters. Applications that do not HTML-encode the description -// and display it in a web page could expose users to potentially malicious -// code. Make sure that you HTML-encode the description field in any application -// that uses this API to display the repository description on a web page. +// and display it in a webpage can expose users to potentially malicious code. +// Make sure that you HTML-encode the description field in any application that +// uses this API to display the repository description on a webpage. // // // Example sending a request using BatchGetRepositoriesRequest. // req := client.BatchGetRepositoriesRequest(params) diff --git a/service/codecommit/api_op_CreateApprovalRuleTemplate.go b/service/codecommit/api_op_CreateApprovalRuleTemplate.go new file mode 100644 index 00000000000..000d444fc67 --- /dev/null +++ b/service/codecommit/api_op_CreateApprovalRuleTemplate.go @@ -0,0 +1,171 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type CreateApprovalRuleTemplateInput struct { + _ struct{} `type:"structure"` + + // The content of the approval rule that is created on pull requests in associated + // repositories. If you specify one or more destination references (branches), + // approval rules are created in an associated repository only if their destination + // references (branches) match those specified in the template. + // + // When you create the content of the approval rule template, you can specify + // approvers in an approval pool in one of two ways: + // + // * CodeCommitApprovers: This option only requires an AWS account and a + // resource. It can be used for both IAM users and federated access users + // whose name matches the provided resource name. This is a very powerful + // option that offers a great deal of flexibility. For example, if you specify + // the AWS account 123456789012 and Mary_Major, all of the following are + // counted as approvals coming from that user: An IAM user in the account + // (arn:aws:iam::123456789012:user/Mary_Major) A federated user identified + // in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) + // This option does not recognize an active session of someone assuming the + // role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) + // unless you include a wildcard (*Mary_Major). + // + // * Fully qualified ARN: This option allows you to specify the fully qualified + // Amazon Resource Name (ARN) of the IAM user or role. + // + // For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers + // (https://docs.aws.amazon.com/iam/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // ApprovalRuleTemplateContent is a required field + ApprovalRuleTemplateContent *string `locationName:"approvalRuleTemplateContent" min:"1" type:"string" required:"true"` + + // The description of the approval rule template. Consider providing a description + // that explains what this template does and when it might be appropriate to + // associate it with repositories. + ApprovalRuleTemplateDescription *string `locationName:"approvalRuleTemplateDescription" type:"string"` + + // The name of the approval rule template. Provide descriptive names, because + // this name is applied to the approval rules created automatically in associated + // repositories. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateApprovalRuleTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateApprovalRuleTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateApprovalRuleTemplateInput"} + + if s.ApprovalRuleTemplateContent == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateContent")) + } + if s.ApprovalRuleTemplateContent != nil && len(*s.ApprovalRuleTemplateContent) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateContent", 1)) + } + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateApprovalRuleTemplateOutput struct { + _ struct{} `type:"structure"` + + // The content and structure of the created approval rule template. + // + // ApprovalRuleTemplate is a required field + ApprovalRuleTemplate *ApprovalRuleTemplate `locationName:"approvalRuleTemplate" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateApprovalRuleTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateApprovalRuleTemplate = "CreateApprovalRuleTemplate" + +// CreateApprovalRuleTemplateRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Creates a template for approval rules that can then be associated with one +// or more repositories in your AWS account. When you associate a template with +// a repository, AWS CodeCommit creates an approval rule that matches the conditions +// of the template for all pull requests that meet the conditions of the template. +// For more information, see AssociateApprovalRuleTemplateWithRepository. +// +// // Example sending a request using CreateApprovalRuleTemplateRequest. +// req := client.CreateApprovalRuleTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/CreateApprovalRuleTemplate +func (c *Client) CreateApprovalRuleTemplateRequest(input *CreateApprovalRuleTemplateInput) CreateApprovalRuleTemplateRequest { + op := &aws.Operation{ + Name: opCreateApprovalRuleTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateApprovalRuleTemplateInput{} + } + + req := c.newRequest(op, input, &CreateApprovalRuleTemplateOutput{}) + return CreateApprovalRuleTemplateRequest{Request: req, Input: input, Copy: c.CreateApprovalRuleTemplateRequest} +} + +// CreateApprovalRuleTemplateRequest is the request type for the +// CreateApprovalRuleTemplate API operation. +type CreateApprovalRuleTemplateRequest struct { + *aws.Request + Input *CreateApprovalRuleTemplateInput + Copy func(*CreateApprovalRuleTemplateInput) CreateApprovalRuleTemplateRequest +} + +// Send marshals and sends the CreateApprovalRuleTemplate API request. +func (r CreateApprovalRuleTemplateRequest) Send(ctx context.Context) (*CreateApprovalRuleTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateApprovalRuleTemplateResponse{ + CreateApprovalRuleTemplateOutput: r.Request.Data.(*CreateApprovalRuleTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateApprovalRuleTemplateResponse is the response type for the +// CreateApprovalRuleTemplate API operation. +type CreateApprovalRuleTemplateResponse struct { + *CreateApprovalRuleTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateApprovalRuleTemplate request. +func (r *CreateApprovalRuleTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_CreateBranch.go b/service/codecommit/api_op_CreateBranch.go index 4ef9c66a760..f147bbb9a46 100644 --- a/service/codecommit/api_op_CreateBranch.go +++ b/service/codecommit/api_op_CreateBranch.go @@ -78,7 +78,7 @@ const opCreateBranch = "CreateBranch" // CreateBranchRequest returns a request value for making API operation for // AWS CodeCommit. // -// Creates a new branch in a repository and points the branch to a commit. +// Creates a branch in a repository and points the branch to a commit. // // Calling the create branch operation does not set a repository's default branch. // To do this, call the update default branch operation. diff --git a/service/codecommit/api_op_CreateCommit.go b/service/codecommit/api_op_CreateCommit.go index b8ea6f004d5..26f90be33bb 100644 --- a/service/codecommit/api_op_CreateCommit.go +++ b/service/codecommit/api_op_CreateCommit.go @@ -13,40 +13,38 @@ import ( type CreateCommitInput struct { _ struct{} `type:"structure"` - // The name of the author who created the commit. This information will be used - // as both the author and committer for the commit. + // The name of the author who created the commit. This information is used as + // both the author and committer for the commit. AuthorName *string `locationName:"authorName" type:"string"` - // The name of the branch where you will create the commit. + // The name of the branch where you create the commit. // // BranchName is a required field BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` - // The commit message you want to include as part of creating the commit. Commit - // messages are limited to 256 KB. If no message is specified, a default message - // will be used. + // The commit message you want to include in the commit. Commit messages are + // limited to 256 KB. If no message is specified, a default message is used. CommitMessage *string `locationName:"commitMessage" type:"string"` - // The files to delete in this commit. These files will still exist in prior - // commits. + // The files to delete in this commit. These files still exist in earlier commits. DeleteFiles []DeleteFileEntry `locationName:"deleteFiles" type:"list"` // The email address of the person who created the commit. Email *string `locationName:"email" type:"string"` // If the commit contains deletions, whether to keep a folder or folder structure - // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. The default is false. + // if the changes leave the folders empty. If true, a ..gitkeep file is created + // for empty folders. The default is false. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` - // The ID of the commit that is the parent of the commit you will create. If - // this is an empty repository, this is not required. + // The ID of the commit that is the parent of the commit you create. Not required + // if this is an empty repository. ParentCommitId *string `locationName:"parentCommitId" type:"string"` // The files to add or update in this commit. PutFiles []PutFileEntry `locationName:"putFiles" type:"list"` - // The name of the repository where you will create the commit. + // The name of the repository where you create the commit. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` diff --git a/service/codecommit/api_op_CreatePullRequest.go b/service/codecommit/api_op_CreatePullRequest.go index ee9a52d7e43..eb1345f6c4b 100644 --- a/service/codecommit/api_op_CreatePullRequest.go +++ b/service/codecommit/api_op_CreatePullRequest.go @@ -13,28 +13,28 @@ import ( type CreatePullRequestInput struct { _ struct{} `type:"structure"` - // A unique, client-generated idempotency token that when provided in a request, + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // returns information about the initial request that used that token. // - // The AWS SDKs prepopulate client request tokens. If using an AWS SDK, you - // do not have to generate an idempotency token, as this will be done for you. + // The AWS SDKs prepopulate client request tokens. If you are using an AWS SDK, + // an idempotency token is created for you. ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` // A description of the pull request. Description *string `locationName:"description" type:"string"` // The targets for the pull request, including the source of the code to be - // reviewed (the source branch), and the destination where the creator of the + // reviewed (the source branch) and the destination where the creator of the // pull request intends the code to be merged after the pull request is closed // (the destination branch). // // Targets is a required field Targets []Target `locationName:"targets" type:"list" required:"true"` - // The title of the pull request. This title will be used to identify the pull - // request to other users in the repository. + // The title of the pull request. This title is used to identify the pull request + // to other users in the repository. // // Title is a required field Title *string `locationName:"title" type:"string" required:"true"` diff --git a/service/codecommit/api_op_CreatePullRequestApprovalRule.go b/service/codecommit/api_op_CreatePullRequestApprovalRule.go new file mode 100644 index 00000000000..470efa2df16 --- /dev/null +++ b/service/codecommit/api_op_CreatePullRequestApprovalRule.go @@ -0,0 +1,169 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type CreatePullRequestApprovalRuleInput struct { + _ struct{} `type:"structure"` + + // The content of the approval rule, including the number of approvals needed + // and the structure of an approval pool defined for approvals, if any. For + // more information about approval pools, see the AWS CodeCommit User Guide. + // + // When you create the content of the approval rule, you can specify approvers + // in an approval pool in one of two ways: + // + // * CodeCommitApprovers: This option only requires an AWS account and a + // resource. It can be used for both IAM users and federated access users + // whose name matches the provided resource name. This is a very powerful + // option that offers a great deal of flexibility. For example, if you specify + // the AWS account 123456789012 and Mary_Major, all of the following would + // be counted as approvals coming from that user: An IAM user in the account + // (arn:aws:iam::123456789012:user/Mary_Major) A federated user identified + // in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) + // This option does not recognize an active session of someone assuming the + // role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) + // unless you include a wildcard (*Mary_Major). + // + // * Fully qualified ARN: This option allows you to specify the fully qualified + // Amazon Resource Name (ARN) of the IAM user or role. + // + // For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers + // (https://docs.aws.amazon.com/iam/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // ApprovalRuleContent is a required field + ApprovalRuleContent *string `locationName:"approvalRuleContent" min:"1" type:"string" required:"true"` + + // The name for the approval rule. + // + // ApprovalRuleName is a required field + ApprovalRuleName *string `locationName:"approvalRuleName" min:"1" type:"string" required:"true"` + + // The system-generated ID of the pull request for which you want to create + // the approval rule. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePullRequestApprovalRuleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePullRequestApprovalRuleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreatePullRequestApprovalRuleInput"} + + if s.ApprovalRuleContent == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleContent")) + } + if s.ApprovalRuleContent != nil && len(*s.ApprovalRuleContent) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleContent", 1)) + } + + if s.ApprovalRuleName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleName")) + } + if s.ApprovalRuleName != nil && len(*s.ApprovalRuleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleName", 1)) + } + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreatePullRequestApprovalRuleOutput struct { + _ struct{} `type:"structure"` + + // Information about the created approval rule. + // + // ApprovalRule is a required field + ApprovalRule *ApprovalRule `locationName:"approvalRule" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreatePullRequestApprovalRuleOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreatePullRequestApprovalRule = "CreatePullRequestApprovalRule" + +// CreatePullRequestApprovalRuleRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Creates an approval rule for a pull request. +// +// // Example sending a request using CreatePullRequestApprovalRuleRequest. +// req := client.CreatePullRequestApprovalRuleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/CreatePullRequestApprovalRule +func (c *Client) CreatePullRequestApprovalRuleRequest(input *CreatePullRequestApprovalRuleInput) CreatePullRequestApprovalRuleRequest { + op := &aws.Operation{ + Name: opCreatePullRequestApprovalRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePullRequestApprovalRuleInput{} + } + + req := c.newRequest(op, input, &CreatePullRequestApprovalRuleOutput{}) + return CreatePullRequestApprovalRuleRequest{Request: req, Input: input, Copy: c.CreatePullRequestApprovalRuleRequest} +} + +// CreatePullRequestApprovalRuleRequest is the request type for the +// CreatePullRequestApprovalRule API operation. +type CreatePullRequestApprovalRuleRequest struct { + *aws.Request + Input *CreatePullRequestApprovalRuleInput + Copy func(*CreatePullRequestApprovalRuleInput) CreatePullRequestApprovalRuleRequest +} + +// Send marshals and sends the CreatePullRequestApprovalRule API request. +func (r CreatePullRequestApprovalRuleRequest) Send(ctx context.Context) (*CreatePullRequestApprovalRuleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreatePullRequestApprovalRuleResponse{ + CreatePullRequestApprovalRuleOutput: r.Request.Data.(*CreatePullRequestApprovalRuleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreatePullRequestApprovalRuleResponse is the response type for the +// CreatePullRequestApprovalRule API operation. +type CreatePullRequestApprovalRuleResponse struct { + *CreatePullRequestApprovalRuleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreatePullRequestApprovalRule request. +func (r *CreatePullRequestApprovalRuleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_CreateRepository.go b/service/codecommit/api_op_CreateRepository.go index f41b64c0d1d..4cfa5743e79 100644 --- a/service/codecommit/api_op_CreateRepository.go +++ b/service/codecommit/api_op_CreateRepository.go @@ -17,18 +17,18 @@ type CreateRepositoryInput struct { // // The description field for a repository accepts all HTML characters and all // valid Unicode characters. Applications that do not HTML-encode the description - // and display it in a web page could expose users to potentially malicious - // code. Make sure that you HTML-encode the description field in any application - // that uses this API to display the repository description on a web page. + // and display it in a webpage can expose users to potentially malicious code. + // Make sure that you HTML-encode the description field in any application that + // uses this API to display the repository description on a webpage. RepositoryDescription *string `locationName:"repositoryDescription" type:"string"` // The name of the new repository to be created. // - // The repository name must be unique across the calling AWS account. In addition, - // repository names are limited to 100 alphanumeric, dash, and underscore characters, - // and cannot include certain characters. For a full description of the limits + // The repository name must be unique across the calling AWS account. Repository + // names are limited to 100 alphanumeric, dash, and underscore characters, and + // cannot include certain characters. For more information about the limits // on repository names, see Limits (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) - // in the AWS CodeCommit User Guide. The suffix ".git" is prohibited. + // in the AWS CodeCommit User Guide. The suffix .git is prohibited. // // RepositoryName is a required field RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` diff --git a/service/codecommit/api_op_CreateUnreferencedMergeCommit.go b/service/codecommit/api_op_CreateUnreferencedMergeCommit.go index 585a3fe3a86..1965ec7690a 100644 --- a/service/codecommit/api_op_CreateUnreferencedMergeCommit.go +++ b/service/codecommit/api_op_CreateUnreferencedMergeCommit.go @@ -13,31 +13,30 @@ type CreateUnreferencedMergeCommitInput struct { _ struct{} `type:"structure"` // The name of the author who created the unreferenced commit. This information - // will be used as both the author and committer for the commit. + // is used as both the author and committer for the commit. AuthorName *string `locationName:"authorName" type:"string"` // The commit message for the unreferenced commit. CommitMessage *string `locationName:"commitMessage" type:"string"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` - // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE - // is chosen as the conflict resolution strategy. + // If AUTOMERGE is the conflict resolution strategy, a list of inputs to use + // when resolving conflicts during a merge. ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -47,7 +46,7 @@ type CreateUnreferencedMergeCommitInput struct { // If the commit contains deletions, whether to keep a folder or folder structure // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. The default is false. + // file is created for empty folders. The default is false. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` // The merge option or strategy you want to use to merge the code. @@ -62,7 +61,7 @@ type CreateUnreferencedMergeCommitInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` @@ -130,7 +129,7 @@ const opCreateUnreferencedMergeCommit = "CreateUnreferencedMergeCommit" // Creates an unreferenced commit that represents the result of merging two // branches using a specified merge strategy. This can help you determine the // outcome of a potential merge. This API cannot be used with the fast-forward -// merge strategy, as that strategy does not create a merge commit. +// merge strategy because that strategy does not create a merge commit. // // This unreferenced merge commit can only be accessed using the GetCommit API // or through git commands such as git fetch. To retrieve this commit, you must diff --git a/service/codecommit/api_op_DeleteApprovalRuleTemplate.go b/service/codecommit/api_op_DeleteApprovalRuleTemplate.go new file mode 100644 index 00000000000..2b8de16da01 --- /dev/null +++ b/service/codecommit/api_op_DeleteApprovalRuleTemplate.go @@ -0,0 +1,125 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DeleteApprovalRuleTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule template to delete. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApprovalRuleTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteApprovalRuleTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteApprovalRuleTemplateInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteApprovalRuleTemplateOutput struct { + _ struct{} `type:"structure"` + + // The system-generated ID of the deleted approval rule template. If the template + // has been previously deleted, the only response is a 200 OK. + // + // ApprovalRuleTemplateId is a required field + ApprovalRuleTemplateId *string `locationName:"approvalRuleTemplateId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteApprovalRuleTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteApprovalRuleTemplate = "DeleteApprovalRuleTemplate" + +// DeleteApprovalRuleTemplateRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Deletes a specified approval rule template. Deleting a template does not +// remove approval rules on pull requests already created with the template. +// +// // Example sending a request using DeleteApprovalRuleTemplateRequest. +// req := client.DeleteApprovalRuleTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeleteApprovalRuleTemplate +func (c *Client) DeleteApprovalRuleTemplateRequest(input *DeleteApprovalRuleTemplateInput) DeleteApprovalRuleTemplateRequest { + op := &aws.Operation{ + Name: opDeleteApprovalRuleTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteApprovalRuleTemplateInput{} + } + + req := c.newRequest(op, input, &DeleteApprovalRuleTemplateOutput{}) + return DeleteApprovalRuleTemplateRequest{Request: req, Input: input, Copy: c.DeleteApprovalRuleTemplateRequest} +} + +// DeleteApprovalRuleTemplateRequest is the request type for the +// DeleteApprovalRuleTemplate API operation. +type DeleteApprovalRuleTemplateRequest struct { + *aws.Request + Input *DeleteApprovalRuleTemplateInput + Copy func(*DeleteApprovalRuleTemplateInput) DeleteApprovalRuleTemplateRequest +} + +// Send marshals and sends the DeleteApprovalRuleTemplate API request. +func (r DeleteApprovalRuleTemplateRequest) Send(ctx context.Context) (*DeleteApprovalRuleTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteApprovalRuleTemplateResponse{ + DeleteApprovalRuleTemplateOutput: r.Request.Data.(*DeleteApprovalRuleTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteApprovalRuleTemplateResponse is the response type for the +// DeleteApprovalRuleTemplate API operation. +type DeleteApprovalRuleTemplateResponse struct { + *DeleteApprovalRuleTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteApprovalRuleTemplate request. +func (r *DeleteApprovalRuleTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_DeleteFile.go b/service/codecommit/api_op_DeleteFile.go index 03ad3c44055..1b73170435e 100644 --- a/service/codecommit/api_op_DeleteFile.go +++ b/service/codecommit/api_op_DeleteFile.go @@ -12,43 +12,42 @@ import ( type DeleteFileInput struct { _ struct{} `type:"structure"` - // The name of the branch where the commit will be made deleting the file. + // The name of the branch where the commit that deletes the file is made. // // BranchName is a required field BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` // The commit message you want to include as part of deleting the file. Commit // messages are limited to 256 KB. If no message is specified, a default message - // will be used. + // is used. CommitMessage *string `locationName:"commitMessage" type:"string"` // The email address for the commit that deletes the file. If no email address - // is specified, the email address will be left blank. + // is specified, the email address is left blank. Email *string `locationName:"email" type:"string"` - // The fully-qualified path to the file that will be deleted, including the - // full name and extension of that file. For example, /examples/file.md is a - // fully qualified path to a file named file.md in a folder named examples. + // The fully qualified path to the file that to be deleted, including the full + // name and extension of that file. For example, /examples/file.md is a fully + // qualified path to a file named file.md in a folder named examples. // // FilePath is a required field FilePath *string `locationName:"filePath" type:"string" required:"true"` - // Specifies whether to delete the folder or directory that contains the file - // you want to delete if that file is the only object in the folder or directory. - // By default, empty folders will be deleted. This includes empty folders that - // are part of the directory structure. For example, if the path to a file is - // dir1/dir2/dir3/dir4, and dir2 and dir3 are empty, deleting the last file - // in dir4 will also delete the empty folders dir4, dir3, and dir2. + // If a file is the only object in the folder or directory, specifies whether + // to delete the folder or directory that contains the file. By default, empty + // folders are deleted. This includes empty folders that are part of the directory + // structure. For example, if the path to a file is dir1/dir2/dir3/dir4, and + // dir2 and dir3 are empty, deleting the last file in dir4 also deletes the + // empty folders dir4, dir3, and dir2. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` // The name of the author of the commit that deletes the file. If no name is - // specified, the user's ARN will be used as the author name and committer name. + // specified, the user's ARN is used as the author name and committer name. Name *string `locationName:"name" type:"string"` // The ID of the commit that is the tip of the branch where you want to create - // the commit that will delete the file. This must be the HEAD commit for the - // branch. The commit that deletes the file will be created from this commit - // ID. + // the commit that deletes the file. This must be the HEAD commit for the branch. + // The commit that deletes the file is created from this commit ID. // // ParentCommitId is a required field ParentCommitId *string `locationName:"parentCommitId" type:"string" required:"true"` @@ -110,8 +109,8 @@ type DeleteFileOutput struct { // CommitId is a required field CommitId *string `locationName:"commitId" type:"string" required:"true"` - // The fully-qualified path to the file that will be deleted, including the - // full name and extension of that file. + // The fully qualified path to the file to be deleted, including the full name + // and extension of that file. // // FilePath is a required field FilePath *string `locationName:"filePath" type:"string" required:"true"` @@ -134,8 +133,8 @@ const opDeleteFile = "DeleteFile" // AWS CodeCommit. // // Deletes a specified file from a specified branch. A commit is created on -// the branch that contains the revision. The file will still exist in the commits -// prior to the commit that contains the deletion. +// the branch that contains the revision. The file still exists in the commits +// earlier to the commit that contains the deletion. // // // Example sending a request using DeleteFileRequest. // req := client.DeleteFileRequest(params) diff --git a/service/codecommit/api_op_DeletePullRequestApprovalRule.go b/service/codecommit/api_op_DeletePullRequestApprovalRule.go new file mode 100644 index 00000000000..78cb776328b --- /dev/null +++ b/service/codecommit/api_op_DeletePullRequestApprovalRule.go @@ -0,0 +1,141 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DeletePullRequestApprovalRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule you want to delete. + // + // ApprovalRuleName is a required field + ApprovalRuleName *string `locationName:"approvalRuleName" min:"1" type:"string" required:"true"` + + // The system-generated ID of the pull request that contains the approval rule + // you want to delete. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePullRequestApprovalRuleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePullRequestApprovalRuleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeletePullRequestApprovalRuleInput"} + + if s.ApprovalRuleName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleName")) + } + if s.ApprovalRuleName != nil && len(*s.ApprovalRuleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleName", 1)) + } + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletePullRequestApprovalRuleOutput struct { + _ struct{} `type:"structure"` + + // The ID of the deleted approval rule. + // + // If the approval rule was deleted in an earlier API call, the response is + // 200 OK without content. + // + // ApprovalRuleId is a required field + ApprovalRuleId *string `locationName:"approvalRuleId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePullRequestApprovalRuleOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeletePullRequestApprovalRule = "DeletePullRequestApprovalRule" + +// DeletePullRequestApprovalRuleRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Deletes an approval rule from a specified pull request. Approval rules can +// be deleted from a pull request only if the pull request is open, and if the +// approval rule was created specifically for a pull request and not generated +// from an approval rule template associated with the repository where the pull +// request was created. You cannot delete an approval rule from a merged or +// closed pull request. +// +// // Example sending a request using DeletePullRequestApprovalRuleRequest. +// req := client.DeletePullRequestApprovalRuleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DeletePullRequestApprovalRule +func (c *Client) DeletePullRequestApprovalRuleRequest(input *DeletePullRequestApprovalRuleInput) DeletePullRequestApprovalRuleRequest { + op := &aws.Operation{ + Name: opDeletePullRequestApprovalRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePullRequestApprovalRuleInput{} + } + + req := c.newRequest(op, input, &DeletePullRequestApprovalRuleOutput{}) + return DeletePullRequestApprovalRuleRequest{Request: req, Input: input, Copy: c.DeletePullRequestApprovalRuleRequest} +} + +// DeletePullRequestApprovalRuleRequest is the request type for the +// DeletePullRequestApprovalRule API operation. +type DeletePullRequestApprovalRuleRequest struct { + *aws.Request + Input *DeletePullRequestApprovalRuleInput + Copy func(*DeletePullRequestApprovalRuleInput) DeletePullRequestApprovalRuleRequest +} + +// Send marshals and sends the DeletePullRequestApprovalRule API request. +func (r DeletePullRequestApprovalRuleRequest) Send(ctx context.Context) (*DeletePullRequestApprovalRuleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeletePullRequestApprovalRuleResponse{ + DeletePullRequestApprovalRuleOutput: r.Request.Data.(*DeletePullRequestApprovalRuleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeletePullRequestApprovalRuleResponse is the response type for the +// DeletePullRequestApprovalRule API operation. +type DeletePullRequestApprovalRuleResponse struct { + *DeletePullRequestApprovalRuleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeletePullRequestApprovalRule request. +func (r *DeletePullRequestApprovalRuleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_DeleteRepository.go b/service/codecommit/api_op_DeleteRepository.go index c888f0eb147..ae95e8297ee 100644 --- a/service/codecommit/api_op_DeleteRepository.go +++ b/service/codecommit/api_op_DeleteRepository.go @@ -60,11 +60,11 @@ const opDeleteRepository = "DeleteRepository" // AWS CodeCommit. // // Deletes a repository. If a specified repository was already deleted, a null -// repository ID will be returned. +// repository ID is returned. // // Deleting a repository also deletes all associated objects and metadata. After // a repository is deleted, all future push calls to the deleted repository -// will fail. +// fail. // // // Example sending a request using DeleteRepositoryRequest. // req := client.DeleteRepositoryRequest(params) diff --git a/service/codecommit/api_op_DescribeMergeConflicts.go b/service/codecommit/api_op_DescribeMergeConflicts.go index 21261eac651..ffde20b5e61 100644 --- a/service/codecommit/api_op_DescribeMergeConflicts.go +++ b/service/codecommit/api_op_DescribeMergeConflicts.go @@ -13,20 +13,19 @@ type DescribeMergeConflictsInput struct { _ struct{} `type:"structure"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -44,7 +43,7 @@ type DescribeMergeConflictsInput struct { // MergeOption is a required field MergeOption MergeOptionTypeEnum `locationName:"mergeOption" type:"string" required:"true" enum:"true"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` @@ -55,7 +54,7 @@ type DescribeMergeConflictsInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` @@ -143,7 +142,7 @@ const opDescribeMergeConflicts = "DescribeMergeConflicts" // Returns information about one or more merge conflicts in the attempted merge // of two commit specifiers using the squash or three-way merge strategy. If // the merge option for the attempted merge is specified as FAST_FORWARD_MERGE, -// an exception will be thrown. +// an exception is thrown. // // // Example sending a request using DescribeMergeConflictsRequest. // req := client.DescribeMergeConflictsRequest(params) diff --git a/service/codecommit/api_op_DescribePullRequestEvents.go b/service/codecommit/api_op_DescribePullRequestEvents.go index a3f0c946cbd..41b37a20b2f 100644 --- a/service/codecommit/api_op_DescribePullRequestEvents.go +++ b/service/codecommit/api_op_DescribePullRequestEvents.go @@ -13,16 +13,16 @@ type DescribePullRequestEventsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the user whose actions resulted in the - // event. Examples include updating the pull request with additional commits - // or changing the status of a pull request. + // event. Examples include updating the pull request with more commits or changing + // the status of a pull request. ActorArn *string `locationName:"actorArn" type:"string"` - // A non-negative integer used to limit the number of returned results. The - // default is 100 events, which is also the maximum number of events that can - // be returned in a result. + // A non-zero, non-negative integer used to limit the number of returned results. + // The default is 100 events, which is also the maximum number of events that + // can be returned in a result. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` diff --git a/service/codecommit/api_op_DisassociateApprovalRuleTemplateFromRepository.go b/service/codecommit/api_op_DisassociateApprovalRuleTemplateFromRepository.go new file mode 100644 index 00000000000..c6b92516b48 --- /dev/null +++ b/service/codecommit/api_op_DisassociateApprovalRuleTemplateFromRepository.go @@ -0,0 +1,137 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" +) + +type DisassociateApprovalRuleTemplateFromRepositoryInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule template to disassociate from a specified repository. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` + + // The name of the repository you want to disassociate from the template. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateApprovalRuleTemplateFromRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateApprovalRuleTemplateFromRepositoryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DisassociateApprovalRuleTemplateFromRepositoryInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if s.RepositoryName == nil { + invalidParams.Add(aws.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisassociateApprovalRuleTemplateFromRepositoryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateApprovalRuleTemplateFromRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +const opDisassociateApprovalRuleTemplateFromRepository = "DisassociateApprovalRuleTemplateFromRepository" + +// DisassociateApprovalRuleTemplateFromRepositoryRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Removes the association between a template and a repository so that approval +// rules based on the template are not automatically created when pull requests +// are created in the specified repository. This does not delete any approval +// rules previously created for pull requests through the template association. +// +// // Example sending a request using DisassociateApprovalRuleTemplateFromRepositoryRequest. +// req := client.DisassociateApprovalRuleTemplateFromRepositoryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/DisassociateApprovalRuleTemplateFromRepository +func (c *Client) DisassociateApprovalRuleTemplateFromRepositoryRequest(input *DisassociateApprovalRuleTemplateFromRepositoryInput) DisassociateApprovalRuleTemplateFromRepositoryRequest { + op := &aws.Operation{ + Name: opDisassociateApprovalRuleTemplateFromRepository, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateApprovalRuleTemplateFromRepositoryInput{} + } + + req := c.newRequest(op, input, &DisassociateApprovalRuleTemplateFromRepositoryOutput{}) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DisassociateApprovalRuleTemplateFromRepositoryRequest{Request: req, Input: input, Copy: c.DisassociateApprovalRuleTemplateFromRepositoryRequest} +} + +// DisassociateApprovalRuleTemplateFromRepositoryRequest is the request type for the +// DisassociateApprovalRuleTemplateFromRepository API operation. +type DisassociateApprovalRuleTemplateFromRepositoryRequest struct { + *aws.Request + Input *DisassociateApprovalRuleTemplateFromRepositoryInput + Copy func(*DisassociateApprovalRuleTemplateFromRepositoryInput) DisassociateApprovalRuleTemplateFromRepositoryRequest +} + +// Send marshals and sends the DisassociateApprovalRuleTemplateFromRepository API request. +func (r DisassociateApprovalRuleTemplateFromRepositoryRequest) Send(ctx context.Context) (*DisassociateApprovalRuleTemplateFromRepositoryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DisassociateApprovalRuleTemplateFromRepositoryResponse{ + DisassociateApprovalRuleTemplateFromRepositoryOutput: r.Request.Data.(*DisassociateApprovalRuleTemplateFromRepositoryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DisassociateApprovalRuleTemplateFromRepositoryResponse is the response type for the +// DisassociateApprovalRuleTemplateFromRepository API operation. +type DisassociateApprovalRuleTemplateFromRepositoryResponse struct { + *DisassociateApprovalRuleTemplateFromRepositoryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DisassociateApprovalRuleTemplateFromRepository request. +func (r *DisassociateApprovalRuleTemplateFromRepositoryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_EvaluatePullRequestApprovalRules.go b/service/codecommit/api_op_EvaluatePullRequestApprovalRules.go new file mode 100644 index 00000000000..dcb4008ae03 --- /dev/null +++ b/service/codecommit/api_op_EvaluatePullRequestApprovalRules.go @@ -0,0 +1,134 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type EvaluatePullRequestApprovalRulesInput struct { + _ struct{} `type:"structure"` + + // The system-generated ID of the pull request you want to evaluate. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + + // The system-generated ID for the pull request revision. To retrieve the most + // recent revision ID for a pull request, use GetPullRequest. + // + // RevisionId is a required field + RevisionId *string `locationName:"revisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s EvaluatePullRequestApprovalRulesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EvaluatePullRequestApprovalRulesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "EvaluatePullRequestApprovalRulesInput"} + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EvaluatePullRequestApprovalRulesOutput struct { + _ struct{} `type:"structure"` + + // The result of the evaluation, including the names of the rules whose conditions + // have been met (if any), the names of the rules whose conditions have not + // been met (if any), whether the pull request is in the approved state, and + // whether the pull request approval rule has been set aside by an override. + // + // Evaluation is a required field + Evaluation *Evaluation `locationName:"evaluation" type:"structure" required:"true"` +} + +// String returns the string representation +func (s EvaluatePullRequestApprovalRulesOutput) String() string { + return awsutil.Prettify(s) +} + +const opEvaluatePullRequestApprovalRules = "EvaluatePullRequestApprovalRules" + +// EvaluatePullRequestApprovalRulesRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Evaluates whether a pull request has met all the conditions specified in +// its associated approval rules. +// +// // Example sending a request using EvaluatePullRequestApprovalRulesRequest. +// req := client.EvaluatePullRequestApprovalRulesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/EvaluatePullRequestApprovalRules +func (c *Client) EvaluatePullRequestApprovalRulesRequest(input *EvaluatePullRequestApprovalRulesInput) EvaluatePullRequestApprovalRulesRequest { + op := &aws.Operation{ + Name: opEvaluatePullRequestApprovalRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EvaluatePullRequestApprovalRulesInput{} + } + + req := c.newRequest(op, input, &EvaluatePullRequestApprovalRulesOutput{}) + return EvaluatePullRequestApprovalRulesRequest{Request: req, Input: input, Copy: c.EvaluatePullRequestApprovalRulesRequest} +} + +// EvaluatePullRequestApprovalRulesRequest is the request type for the +// EvaluatePullRequestApprovalRules API operation. +type EvaluatePullRequestApprovalRulesRequest struct { + *aws.Request + Input *EvaluatePullRequestApprovalRulesInput + Copy func(*EvaluatePullRequestApprovalRulesInput) EvaluatePullRequestApprovalRulesRequest +} + +// Send marshals and sends the EvaluatePullRequestApprovalRules API request. +func (r EvaluatePullRequestApprovalRulesRequest) Send(ctx context.Context) (*EvaluatePullRequestApprovalRulesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &EvaluatePullRequestApprovalRulesResponse{ + EvaluatePullRequestApprovalRulesOutput: r.Request.Data.(*EvaluatePullRequestApprovalRulesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// EvaluatePullRequestApprovalRulesResponse is the response type for the +// EvaluatePullRequestApprovalRules API operation. +type EvaluatePullRequestApprovalRulesResponse struct { + *EvaluatePullRequestApprovalRulesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// EvaluatePullRequestApprovalRules request. +func (r *EvaluatePullRequestApprovalRulesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_GetApprovalRuleTemplate.go b/service/codecommit/api_op_GetApprovalRuleTemplate.go new file mode 100644 index 00000000000..26457822202 --- /dev/null +++ b/service/codecommit/api_op_GetApprovalRuleTemplate.go @@ -0,0 +1,123 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type GetApprovalRuleTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule template for which you want to get information. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetApprovalRuleTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApprovalRuleTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetApprovalRuleTemplateInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetApprovalRuleTemplateOutput struct { + _ struct{} `type:"structure"` + + // The content and structure of the approval rule template. + // + // ApprovalRuleTemplate is a required field + ApprovalRuleTemplate *ApprovalRuleTemplate `locationName:"approvalRuleTemplate" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetApprovalRuleTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetApprovalRuleTemplate = "GetApprovalRuleTemplate" + +// GetApprovalRuleTemplateRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Returns information about a specified approval rule template. +// +// // Example sending a request using GetApprovalRuleTemplateRequest. +// req := client.GetApprovalRuleTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetApprovalRuleTemplate +func (c *Client) GetApprovalRuleTemplateRequest(input *GetApprovalRuleTemplateInput) GetApprovalRuleTemplateRequest { + op := &aws.Operation{ + Name: opGetApprovalRuleTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetApprovalRuleTemplateInput{} + } + + req := c.newRequest(op, input, &GetApprovalRuleTemplateOutput{}) + return GetApprovalRuleTemplateRequest{Request: req, Input: input, Copy: c.GetApprovalRuleTemplateRequest} +} + +// GetApprovalRuleTemplateRequest is the request type for the +// GetApprovalRuleTemplate API operation. +type GetApprovalRuleTemplateRequest struct { + *aws.Request + Input *GetApprovalRuleTemplateInput + Copy func(*GetApprovalRuleTemplateInput) GetApprovalRuleTemplateRequest +} + +// Send marshals and sends the GetApprovalRuleTemplate API request. +func (r GetApprovalRuleTemplateRequest) Send(ctx context.Context) (*GetApprovalRuleTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetApprovalRuleTemplateResponse{ + GetApprovalRuleTemplateOutput: r.Request.Data.(*GetApprovalRuleTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetApprovalRuleTemplateResponse is the response type for the +// GetApprovalRuleTemplate API operation. +type GetApprovalRuleTemplateResponse struct { + *GetApprovalRuleTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetApprovalRuleTemplate request. +func (r *GetApprovalRuleTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_GetBlob.go b/service/codecommit/api_op_GetBlob.go index 44c45b181f1..d2501fc0b8f 100644 --- a/service/codecommit/api_op_GetBlob.go +++ b/service/codecommit/api_op_GetBlob.go @@ -72,7 +72,7 @@ const opGetBlob = "GetBlob" // GetBlobRequest returns a request value for making API operation for // AWS CodeCommit. // -// Returns the base-64 encoded content of an individual blob within a repository. +// Returns the base-64 encoded content of an individual blob in a repository. // // // Example sending a request using GetBlobRequest. // req := client.GetBlobRequest(params) diff --git a/service/codecommit/api_op_GetCommentsForComparedCommit.go b/service/codecommit/api_op_GetCommentsForComparedCommit.go index 922f8c9042c..6c7492d9d42 100644 --- a/service/codecommit/api_op_GetCommentsForComparedCommit.go +++ b/service/codecommit/api_op_GetCommentsForComparedCommit.go @@ -13,17 +13,17 @@ type GetCommentsForComparedCommitInput struct { _ struct{} `type:"structure"` // To establish the directionality of the comparison, the full commit ID of - // the 'after' commit. + // the after commit. // // AfterCommitId is a required field AfterCommitId *string `locationName:"afterCommitId" type:"string" required:"true"` // To establish the directionality of the comparison, the full commit ID of - // the 'before' commit. + // the before commit. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` - // A non-negative integer used to limit the number of returned results. The - // default is 100 comments, and is configurable up to 500. + // A non-zero, non-negative integer used to limit the number of returned results. + // The default is 100 comments, but you can configure up to 500. MaxResults *int64 `locationName:"maxResults" type:"integer"` // An enumeration token that when provided in a request, returns the next batch diff --git a/service/codecommit/api_op_GetCommentsForPullRequest.go b/service/codecommit/api_op_GetCommentsForPullRequest.go index 7bce7bcc34d..d604ed8a15a 100644 --- a/service/codecommit/api_op_GetCommentsForPullRequest.go +++ b/service/codecommit/api_op_GetCommentsForPullRequest.go @@ -20,12 +20,12 @@ type GetCommentsForPullRequestInput struct { // of the branch at the time the pull request was created. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` - // A non-negative integer used to limit the number of returned results. The - // default is 100 comments. You can return up to 500 comments with a single + // A non-zero, non-negative integer used to limit the number of returned results. + // The default is 100 comments. You can return up to 500 comments with a single // request. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` diff --git a/service/codecommit/api_op_GetCommit.go b/service/codecommit/api_op_GetCommit.go index 931cf68397f..683cd321174 100644 --- a/service/codecommit/api_op_GetCommit.go +++ b/service/codecommit/api_op_GetCommit.go @@ -13,7 +13,7 @@ import ( type GetCommitInput struct { _ struct{} `type:"structure"` - // The commit ID. Commit IDs are the full SHA of the commit. + // The commit ID. Commit IDs are the full SHA ID of the commit. // // CommitId is a required field CommitId *string `locationName:"commitId" type:"string" required:"true"` diff --git a/service/codecommit/api_op_GetDifferences.go b/service/codecommit/api_op_GetDifferences.go index b75951d0ed1..a97b659c4c4 100644 --- a/service/codecommit/api_op_GetDifferences.go +++ b/service/codecommit/api_op_GetDifferences.go @@ -20,26 +20,26 @@ type GetDifferencesInput struct { // The file path in which to check differences. Limits the results to this path. // Can also be used to specify the changed name of a directory or folder, if - // it has changed. If not specified, differences will be shown for all paths. + // it has changed. If not specified, differences are shown for all paths. AfterPath *string `locationName:"afterPath" type:"string"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, the full commit ID. Optional. If not specified, all - // changes prior to the afterCommitSpecifier value will be shown. If you do - // not use beforeCommitSpecifier in your request, consider limiting the results - // with maxResults. + // a commit (for example, the full commit ID). Optional. If not specified, all + // changes before the afterCommitSpecifier value are shown. If you do not use + // beforeCommitSpecifier in your request, consider limiting the results with + // maxResults. BeforeCommitSpecifier *string `locationName:"beforeCommitSpecifier" type:"string"` // The file path in which to check for differences. Limits the results to this // path. Can also be used to specify the previous name of a directory or folder. - // If beforePath and afterPath are not specified, differences will be shown - // for all paths. + // If beforePath and afterPath are not specified, differences are shown for + // all paths. BeforePath *string `locationName:"beforePath" type:"string"` - // A non-negative integer used to limit the number of returned results. + // A non-zero, non-negative integer used to limit the number of returned results. MaxResults *int64 `type:"integer"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `type:"string"` @@ -78,8 +78,8 @@ func (s *GetDifferencesInput) Validate() error { type GetDifferencesOutput struct { _ struct{} `type:"structure"` - // A differences data type object that contains information about the differences, - // including whether the difference is added, modified, or deleted (A, D, M). + // A data type object that contains information about the differences, including + // whether the difference is added, modified, or deleted (A, D, M). Differences []Difference `locationName:"differences" type:"list"` // An enumeration token that can be used in a request to return the next batch @@ -98,7 +98,7 @@ const opGetDifferences = "GetDifferences" // AWS CodeCommit. // // Returns information about the differences in a valid commit specifier (such -// as a branch, tag, HEAD, commit ID or other fully qualified reference). Results +// as a branch, tag, HEAD, commit ID, or other fully qualified reference). Results // can be limited to a specified path. // // // Example sending a request using GetDifferencesRequest. diff --git a/service/codecommit/api_op_GetFile.go b/service/codecommit/api_op_GetFile.go index 712d4fb715b..c2c3c92d78a 100644 --- a/service/codecommit/api_op_GetFile.go +++ b/service/codecommit/api_op_GetFile.go @@ -12,14 +12,14 @@ import ( type GetFileInput struct { _ struct{} `type:"structure"` - // The fully-quaified reference that identifies the commit that contains the - // file. For example, you could specify a full commit ID, a tag, a branch name, - // or a reference such as refs/heads/master. If none is provided, then the head - // commit will be used. + // The fully quaified reference that identifies the commit that contains the + // file. For example, you can specify a full commit ID, a tag, a branch name, + // or a reference such as refs/heads/master. If none is provided, the head commit + // is used. CommitSpecifier *string `locationName:"commitSpecifier" type:"string"` - // The fully-qualified path to the file, including the full name and extension - // of the file. For example, /examples/file.md is the fully-qualified path to + // The fully qualified path to the file, including the full name and extension + // of the file. For example, /examples/file.md is the fully qualified path to // a file named file.md in a folder named examples. // // FilePath is a required field @@ -83,13 +83,13 @@ type GetFileOutput struct { // // The file mode permissions returned by this API are not the standard file // mode permission values, such as 100644, but rather extrapolated values. See - // below for a full list of supported return values. + // the supported return values. // // FileMode is a required field FileMode FileModeTypeEnum `locationName:"fileMode" type:"string" required:"true" enum:"true"` - // The fully qualified path to the specified file. This returns the name and - // extension of the file. + // The fully qualified path to the specified file. Returns the name and extension + // of the file. // // FilePath is a required field FilePath *string `locationName:"filePath" type:"string" required:"true"` diff --git a/service/codecommit/api_op_GetFolder.go b/service/codecommit/api_op_GetFolder.go index e8fd0cc9629..12f0cb84690 100644 --- a/service/codecommit/api_op_GetFolder.go +++ b/service/codecommit/api_op_GetFolder.go @@ -12,13 +12,13 @@ import ( type GetFolderInput struct { _ struct{} `type:"structure"` - // A fully-qualified reference used to identify a commit that contains the version - // of the folder's content to return. A fully-qualified reference can be a commit + // A fully qualified reference used to identify a commit that contains the version + // of the folder's content to return. A fully qualified reference can be a commit // ID, branch name, tag, or reference such as HEAD. If no specifier is provided, - // the folder content will be returned as it exists in the HEAD commit. + // the folder content is returned as it exists in the HEAD commit. CommitSpecifier *string `locationName:"commitSpecifier" type:"string"` - // The fully-qualified path to the folder whose contents will be returned, including + // The fully qualified path to the folder whose contents are returned, including // the folder name. For example, /examples is a fully-qualified path to a folder // named examples that was created off of the root directory (/) of a repository. // @@ -60,28 +60,28 @@ func (s *GetFolderInput) Validate() error { type GetFolderOutput struct { _ struct{} `type:"structure"` - // The full commit ID used as a reference for which version of the folder content - // is returned. + // The full commit ID used as a reference for the returned version of the folder + // content. // // CommitId is a required field CommitId *string `locationName:"commitId" type:"string" required:"true"` - // The list of files that exist in the specified folder, if any. + // The list of files in the specified folder, if any. Files []File `locationName:"files" type:"list"` - // The fully-qualified path of the folder whose contents are returned. + // The fully qualified path of the folder whose contents are returned. // // FolderPath is a required field FolderPath *string `locationName:"folderPath" type:"string" required:"true"` - // The list of folders that exist beneath the specified folder, if any. + // The list of folders that exist under the specified folder, if any. SubFolders []Folder `locationName:"subFolders" type:"list"` - // The list of submodules that exist in the specified folder, if any. + // The list of submodules in the specified folder, if any. SubModules []SubModule `locationName:"subModules" type:"list"` - // The list of symbolic links to other files and folders that exist in the specified - // folder, if any. + // The list of symbolic links to other files and folders in the specified folder, + // if any. SymbolicLinks []SymbolicLink `locationName:"symbolicLinks" type:"list"` // The full SHA-1 pointer of the tree information for the commit that contains diff --git a/service/codecommit/api_op_GetMergeCommit.go b/service/codecommit/api_op_GetMergeCommit.go index ff1847d0c84..19cf6564a4f 100644 --- a/service/codecommit/api_op_GetMergeCommit.go +++ b/service/codecommit/api_op_GetMergeCommit.go @@ -13,20 +13,19 @@ type GetMergeCommitInput struct { _ struct{} `type:"structure"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -38,7 +37,7 @@ type GetMergeCommitInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` @@ -86,7 +85,7 @@ type GetMergeCommitOutput struct { // The commit ID for the merge commit created when the source branch was merged // into the destination branch. If the fast-forward merge strategy was used, - // no merge commit exists. + // there is no merge commit. MergedCommitId *string `locationName:"mergedCommitId" type:"string"` // The commit ID of the source commit specifier that was used in the merge evaluation. diff --git a/service/codecommit/api_op_GetMergeConflicts.go b/service/codecommit/api_op_GetMergeConflicts.go index 71cb774eed6..557688c3738 100644 --- a/service/codecommit/api_op_GetMergeConflicts.go +++ b/service/codecommit/api_op_GetMergeConflicts.go @@ -13,20 +13,19 @@ type GetMergeConflictsInput struct { _ struct{} `type:"structure"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -39,7 +38,7 @@ type GetMergeConflictsInput struct { // MergeOption is a required field MergeOption MergeOptionTypeEnum `locationName:"mergeOption" type:"string" required:"true" enum:"true"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` @@ -49,7 +48,7 @@ type GetMergeConflictsInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` @@ -95,7 +94,7 @@ type GetMergeConflictsOutput struct { BaseCommitId *string `locationName:"baseCommitId" type:"string"` // A list of metadata for any conflicting files. If the specified merge strategy - // is FAST_FORWARD_MERGE, this list will always be empty. + // is FAST_FORWARD_MERGE, this list is always empty. // // ConflictMetadataList is a required field ConflictMetadataList []ConflictMetadata `locationName:"conflictMetadataList" type:"list" required:"true"` diff --git a/service/codecommit/api_op_GetMergeOptions.go b/service/codecommit/api_op_GetMergeOptions.go index 5852fbb6a7e..e0156080d4e 100644 --- a/service/codecommit/api_op_GetMergeOptions.go +++ b/service/codecommit/api_op_GetMergeOptions.go @@ -13,20 +13,19 @@ type GetMergeOptionsInput struct { _ struct{} `type:"structure"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -38,7 +37,7 @@ type GetMergeOptionsInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` @@ -110,8 +109,8 @@ const opGetMergeOptions = "GetMergeOptions" // AWS CodeCommit. // // Returns information about the merge options available for merging two specified -// branches. For details about why a particular merge option is not available, -// use GetMergeConflicts or DescribeMergeConflicts. +// branches. For details about why a merge option is not available, use GetMergeConflicts +// or DescribeMergeConflicts. // // // Example sending a request using GetMergeOptionsRequest. // req := client.GetMergeOptionsRequest(params) diff --git a/service/codecommit/api_op_GetPullRequestApprovalStates.go b/service/codecommit/api_op_GetPullRequestApprovalStates.go new file mode 100644 index 00000000000..3eb27c1898d --- /dev/null +++ b/service/codecommit/api_op_GetPullRequestApprovalStates.go @@ -0,0 +1,129 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type GetPullRequestApprovalStatesInput struct { + _ struct{} `type:"structure"` + + // The system-generated ID for the pull request. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + + // The system-generated ID for the pull request revision. + // + // RevisionId is a required field + RevisionId *string `locationName:"revisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPullRequestApprovalStatesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPullRequestApprovalStatesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetPullRequestApprovalStatesInput"} + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetPullRequestApprovalStatesOutput struct { + _ struct{} `type:"structure"` + + // Information about users who have approved the pull request. + Approvals []Approval `locationName:"approvals" type:"list"` +} + +// String returns the string representation +func (s GetPullRequestApprovalStatesOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetPullRequestApprovalStates = "GetPullRequestApprovalStates" + +// GetPullRequestApprovalStatesRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Gets information about the approval states for a specified pull request. +// Approval states only apply to pull requests that have one or more approval +// rules applied to them. +// +// // Example sending a request using GetPullRequestApprovalStatesRequest. +// req := client.GetPullRequestApprovalStatesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetPullRequestApprovalStates +func (c *Client) GetPullRequestApprovalStatesRequest(input *GetPullRequestApprovalStatesInput) GetPullRequestApprovalStatesRequest { + op := &aws.Operation{ + Name: opGetPullRequestApprovalStates, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPullRequestApprovalStatesInput{} + } + + req := c.newRequest(op, input, &GetPullRequestApprovalStatesOutput{}) + return GetPullRequestApprovalStatesRequest{Request: req, Input: input, Copy: c.GetPullRequestApprovalStatesRequest} +} + +// GetPullRequestApprovalStatesRequest is the request type for the +// GetPullRequestApprovalStates API operation. +type GetPullRequestApprovalStatesRequest struct { + *aws.Request + Input *GetPullRequestApprovalStatesInput + Copy func(*GetPullRequestApprovalStatesInput) GetPullRequestApprovalStatesRequest +} + +// Send marshals and sends the GetPullRequestApprovalStates API request. +func (r GetPullRequestApprovalStatesRequest) Send(ctx context.Context) (*GetPullRequestApprovalStatesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetPullRequestApprovalStatesResponse{ + GetPullRequestApprovalStatesOutput: r.Request.Data.(*GetPullRequestApprovalStatesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetPullRequestApprovalStatesResponse is the response type for the +// GetPullRequestApprovalStates API operation. +type GetPullRequestApprovalStatesResponse struct { + *GetPullRequestApprovalStatesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetPullRequestApprovalStates request. +func (r *GetPullRequestApprovalStatesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_GetPullRequestOverrideState.go b/service/codecommit/api_op_GetPullRequestOverrideState.go new file mode 100644 index 00000000000..08b73aa4015 --- /dev/null +++ b/service/codecommit/api_op_GetPullRequestOverrideState.go @@ -0,0 +1,136 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type GetPullRequestOverrideStateInput struct { + _ struct{} `type:"structure"` + + // The ID of the pull request for which you want to get information about whether + // approval rules have been set aside (overridden). + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + + // The system-generated ID of the revision for the pull request. To retrieve + // the most recent revision ID, use GetPullRequest. + // + // RevisionId is a required field + RevisionId *string `locationName:"revisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPullRequestOverrideStateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPullRequestOverrideStateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetPullRequestOverrideStateInput"} + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetPullRequestOverrideStateOutput struct { + _ struct{} `type:"structure"` + + // A Boolean value that indicates whether a pull request has had its rules set + // aside (TRUE) or whether all approval rules still apply (FALSE). + Overridden *bool `locationName:"overridden" type:"boolean"` + + // The Amazon Resource Name (ARN) of the user or identity that overrode the + // rules and their requirements for the pull request. + Overrider *string `locationName:"overrider" type:"string"` +} + +// String returns the string representation +func (s GetPullRequestOverrideStateOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetPullRequestOverrideState = "GetPullRequestOverrideState" + +// GetPullRequestOverrideStateRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Returns information about whether approval rules have been set aside (overridden) +// for a pull request, and if so, the Amazon Resource Name (ARN) of the user +// or identity that overrode the rules and their requirements for the pull request. +// +// // Example sending a request using GetPullRequestOverrideStateRequest. +// req := client.GetPullRequestOverrideStateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetPullRequestOverrideState +func (c *Client) GetPullRequestOverrideStateRequest(input *GetPullRequestOverrideStateInput) GetPullRequestOverrideStateRequest { + op := &aws.Operation{ + Name: opGetPullRequestOverrideState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPullRequestOverrideStateInput{} + } + + req := c.newRequest(op, input, &GetPullRequestOverrideStateOutput{}) + return GetPullRequestOverrideStateRequest{Request: req, Input: input, Copy: c.GetPullRequestOverrideStateRequest} +} + +// GetPullRequestOverrideStateRequest is the request type for the +// GetPullRequestOverrideState API operation. +type GetPullRequestOverrideStateRequest struct { + *aws.Request + Input *GetPullRequestOverrideStateInput + Copy func(*GetPullRequestOverrideStateInput) GetPullRequestOverrideStateRequest +} + +// Send marshals and sends the GetPullRequestOverrideState API request. +func (r GetPullRequestOverrideStateRequest) Send(ctx context.Context) (*GetPullRequestOverrideStateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetPullRequestOverrideStateResponse{ + GetPullRequestOverrideStateOutput: r.Request.Data.(*GetPullRequestOverrideStateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetPullRequestOverrideStateResponse is the response type for the +// GetPullRequestOverrideState API operation. +type GetPullRequestOverrideStateResponse struct { + *GetPullRequestOverrideStateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetPullRequestOverrideState request. +func (r *GetPullRequestOverrideStateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_GetRepository.go b/service/codecommit/api_op_GetRepository.go index 9f13e0052c1..adcd6fb69e7 100644 --- a/service/codecommit/api_op_GetRepository.go +++ b/service/codecommit/api_op_GetRepository.go @@ -63,9 +63,9 @@ const opGetRepository = "GetRepository" // // The description field for a repository accepts all HTML characters and all // valid Unicode characters. Applications that do not HTML-encode the description -// and display it in a web page could expose users to potentially malicious -// code. Make sure that you HTML-encode the description field in any application -// that uses this API to display the repository description on a web page. +// and display it in a webpage can expose users to potentially malicious code. +// Make sure that you HTML-encode the description field in any application that +// uses this API to display the repository description on a webpage. // // // Example sending a request using GetRepositoryRequest. // req := client.GetRepositoryRequest(params) diff --git a/service/codecommit/api_op_ListApprovalRuleTemplates.go b/service/codecommit/api_op_ListApprovalRuleTemplates.go new file mode 100644 index 00000000000..350b8cc57ee --- /dev/null +++ b/service/codecommit/api_op_ListApprovalRuleTemplates.go @@ -0,0 +1,166 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListApprovalRuleTemplatesInput struct { + _ struct{} `type:"structure"` + + // A non-zero, non-negative integer used to limit the number of returned results. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // An enumeration token that, when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListApprovalRuleTemplatesInput) String() string { + return awsutil.Prettify(s) +} + +type ListApprovalRuleTemplatesOutput struct { + _ struct{} `type:"structure"` + + // The names of all the approval rule templates found in the AWS Region for + // your AWS account. + ApprovalRuleTemplateNames []string `locationName:"approvalRuleTemplateNames" type:"list"` + + // An enumeration token that allows the operation to batch the next results + // of the operation. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListApprovalRuleTemplatesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListApprovalRuleTemplates = "ListApprovalRuleTemplates" + +// ListApprovalRuleTemplatesRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Lists all approval rule templates in the specified AWS Region in your AWS +// account. If an AWS Region is not specified, the AWS Region where you are +// signed in is used. +// +// // Example sending a request using ListApprovalRuleTemplatesRequest. +// req := client.ListApprovalRuleTemplatesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListApprovalRuleTemplates +func (c *Client) ListApprovalRuleTemplatesRequest(input *ListApprovalRuleTemplatesInput) ListApprovalRuleTemplatesRequest { + op := &aws.Operation{ + Name: opListApprovalRuleTemplates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListApprovalRuleTemplatesInput{} + } + + req := c.newRequest(op, input, &ListApprovalRuleTemplatesOutput{}) + return ListApprovalRuleTemplatesRequest{Request: req, Input: input, Copy: c.ListApprovalRuleTemplatesRequest} +} + +// ListApprovalRuleTemplatesRequest is the request type for the +// ListApprovalRuleTemplates API operation. +type ListApprovalRuleTemplatesRequest struct { + *aws.Request + Input *ListApprovalRuleTemplatesInput + Copy func(*ListApprovalRuleTemplatesInput) ListApprovalRuleTemplatesRequest +} + +// Send marshals and sends the ListApprovalRuleTemplates API request. +func (r ListApprovalRuleTemplatesRequest) Send(ctx context.Context) (*ListApprovalRuleTemplatesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListApprovalRuleTemplatesResponse{ + ListApprovalRuleTemplatesOutput: r.Request.Data.(*ListApprovalRuleTemplatesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListApprovalRuleTemplatesRequestPaginator returns a paginator for ListApprovalRuleTemplates. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListApprovalRuleTemplatesRequest(input) +// p := codecommit.NewListApprovalRuleTemplatesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListApprovalRuleTemplatesPaginator(req ListApprovalRuleTemplatesRequest) ListApprovalRuleTemplatesPaginator { + return ListApprovalRuleTemplatesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListApprovalRuleTemplatesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListApprovalRuleTemplatesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListApprovalRuleTemplatesPaginator struct { + aws.Pager +} + +func (p *ListApprovalRuleTemplatesPaginator) CurrentPage() *ListApprovalRuleTemplatesOutput { + return p.Pager.CurrentPage().(*ListApprovalRuleTemplatesOutput) +} + +// ListApprovalRuleTemplatesResponse is the response type for the +// ListApprovalRuleTemplates API operation. +type ListApprovalRuleTemplatesResponse struct { + *ListApprovalRuleTemplatesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListApprovalRuleTemplates request. +func (r *ListApprovalRuleTemplatesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_ListAssociatedApprovalRuleTemplatesForRepository.go b/service/codecommit/api_op_ListAssociatedApprovalRuleTemplatesForRepository.go new file mode 100644 index 00000000000..efa81245b9d --- /dev/null +++ b/service/codecommit/api_op_ListAssociatedApprovalRuleTemplatesForRepository.go @@ -0,0 +1,186 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListAssociatedApprovalRuleTemplatesForRepositoryInput struct { + _ struct{} `type:"structure"` + + // A non-zero, non-negative integer used to limit the number of returned results. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // An enumeration token that, when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the repository for which you want to list all associated approval + // rule templates. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAssociatedApprovalRuleTemplatesForRepositoryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAssociatedApprovalRuleTemplatesForRepositoryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListAssociatedApprovalRuleTemplatesForRepositoryInput"} + + if s.RepositoryName == nil { + invalidParams.Add(aws.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListAssociatedApprovalRuleTemplatesForRepositoryOutput struct { + _ struct{} `type:"structure"` + + // The names of all approval rule templates associated with the repository. + ApprovalRuleTemplateNames []string `locationName:"approvalRuleTemplateNames" type:"list"` + + // An enumeration token that allows the operation to batch the next results + // of the operation. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListAssociatedApprovalRuleTemplatesForRepositoryOutput) String() string { + return awsutil.Prettify(s) +} + +const opListAssociatedApprovalRuleTemplatesForRepository = "ListAssociatedApprovalRuleTemplatesForRepository" + +// ListAssociatedApprovalRuleTemplatesForRepositoryRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Lists all approval rule templates that are associated with a specified repository. +// +// // Example sending a request using ListAssociatedApprovalRuleTemplatesForRepositoryRequest. +// req := client.ListAssociatedApprovalRuleTemplatesForRepositoryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListAssociatedApprovalRuleTemplatesForRepository +func (c *Client) ListAssociatedApprovalRuleTemplatesForRepositoryRequest(input *ListAssociatedApprovalRuleTemplatesForRepositoryInput) ListAssociatedApprovalRuleTemplatesForRepositoryRequest { + op := &aws.Operation{ + Name: opListAssociatedApprovalRuleTemplatesForRepository, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAssociatedApprovalRuleTemplatesForRepositoryInput{} + } + + req := c.newRequest(op, input, &ListAssociatedApprovalRuleTemplatesForRepositoryOutput{}) + return ListAssociatedApprovalRuleTemplatesForRepositoryRequest{Request: req, Input: input, Copy: c.ListAssociatedApprovalRuleTemplatesForRepositoryRequest} +} + +// ListAssociatedApprovalRuleTemplatesForRepositoryRequest is the request type for the +// ListAssociatedApprovalRuleTemplatesForRepository API operation. +type ListAssociatedApprovalRuleTemplatesForRepositoryRequest struct { + *aws.Request + Input *ListAssociatedApprovalRuleTemplatesForRepositoryInput + Copy func(*ListAssociatedApprovalRuleTemplatesForRepositoryInput) ListAssociatedApprovalRuleTemplatesForRepositoryRequest +} + +// Send marshals and sends the ListAssociatedApprovalRuleTemplatesForRepository API request. +func (r ListAssociatedApprovalRuleTemplatesForRepositoryRequest) Send(ctx context.Context) (*ListAssociatedApprovalRuleTemplatesForRepositoryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListAssociatedApprovalRuleTemplatesForRepositoryResponse{ + ListAssociatedApprovalRuleTemplatesForRepositoryOutput: r.Request.Data.(*ListAssociatedApprovalRuleTemplatesForRepositoryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListAssociatedApprovalRuleTemplatesForRepositoryRequestPaginator returns a paginator for ListAssociatedApprovalRuleTemplatesForRepository. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListAssociatedApprovalRuleTemplatesForRepositoryRequest(input) +// p := codecommit.NewListAssociatedApprovalRuleTemplatesForRepositoryRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListAssociatedApprovalRuleTemplatesForRepositoryPaginator(req ListAssociatedApprovalRuleTemplatesForRepositoryRequest) ListAssociatedApprovalRuleTemplatesForRepositoryPaginator { + return ListAssociatedApprovalRuleTemplatesForRepositoryPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListAssociatedApprovalRuleTemplatesForRepositoryInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListAssociatedApprovalRuleTemplatesForRepositoryPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListAssociatedApprovalRuleTemplatesForRepositoryPaginator struct { + aws.Pager +} + +func (p *ListAssociatedApprovalRuleTemplatesForRepositoryPaginator) CurrentPage() *ListAssociatedApprovalRuleTemplatesForRepositoryOutput { + return p.Pager.CurrentPage().(*ListAssociatedApprovalRuleTemplatesForRepositoryOutput) +} + +// ListAssociatedApprovalRuleTemplatesForRepositoryResponse is the response type for the +// ListAssociatedApprovalRuleTemplatesForRepository API operation. +type ListAssociatedApprovalRuleTemplatesForRepositoryResponse struct { + *ListAssociatedApprovalRuleTemplatesForRepositoryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListAssociatedApprovalRuleTemplatesForRepository request. +func (r *ListAssociatedApprovalRuleTemplatesForRepositoryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_ListPullRequests.go b/service/codecommit/api_op_ListPullRequests.go index 736985a4b1a..470e3a5cce7 100644 --- a/service/codecommit/api_op_ListPullRequests.go +++ b/service/codecommit/api_op_ListPullRequests.go @@ -17,10 +17,10 @@ type ListPullRequestsInput struct { // user. AuthorArn *string `locationName:"authorArn" type:"string"` - // A non-negative integer used to limit the number of returned results. + // A non-zero, non-negative integer used to limit the number of returned results. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` @@ -59,8 +59,8 @@ func (s *ListPullRequestsInput) Validate() error { type ListPullRequestsOutput struct { _ struct{} `type:"structure"` - // An enumeration token that when provided in a request, returns the next batch - // of the results. + // An enumeration token that allows the operation to batch the next results + // of the operation. NextToken *string `locationName:"nextToken" type:"string"` // The system-generated IDs of the pull requests. diff --git a/service/codecommit/api_op_ListRepositoriesForApprovalRuleTemplate.go b/service/codecommit/api_op_ListRepositoriesForApprovalRuleTemplate.go new file mode 100644 index 00000000000..10a52f6aa72 --- /dev/null +++ b/service/codecommit/api_op_ListRepositoriesForApprovalRuleTemplate.go @@ -0,0 +1,187 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListRepositoriesForApprovalRuleTemplateInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule template for which you want to list repositories + // that are associated with that template. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` + + // A non-zero, non-negative integer used to limit the number of returned results. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // An enumeration token that, when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListRepositoriesForApprovalRuleTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRepositoriesForApprovalRuleTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListRepositoriesForApprovalRuleTemplateInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListRepositoriesForApprovalRuleTemplateOutput struct { + _ struct{} `type:"structure"` + + // An enumeration token that allows the operation to batch the next results + // of the operation. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of repository names that are associated with the specified approval + // rule template. + RepositoryNames []string `locationName:"repositoryNames" type:"list"` +} + +// String returns the string representation +func (s ListRepositoriesForApprovalRuleTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +const opListRepositoriesForApprovalRuleTemplate = "ListRepositoriesForApprovalRuleTemplate" + +// ListRepositoriesForApprovalRuleTemplateRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Lists all repositories associated with the specified approval rule template. +// +// // Example sending a request using ListRepositoriesForApprovalRuleTemplateRequest. +// req := client.ListRepositoriesForApprovalRuleTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/ListRepositoriesForApprovalRuleTemplate +func (c *Client) ListRepositoriesForApprovalRuleTemplateRequest(input *ListRepositoriesForApprovalRuleTemplateInput) ListRepositoriesForApprovalRuleTemplateRequest { + op := &aws.Operation{ + Name: opListRepositoriesForApprovalRuleTemplate, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRepositoriesForApprovalRuleTemplateInput{} + } + + req := c.newRequest(op, input, &ListRepositoriesForApprovalRuleTemplateOutput{}) + return ListRepositoriesForApprovalRuleTemplateRequest{Request: req, Input: input, Copy: c.ListRepositoriesForApprovalRuleTemplateRequest} +} + +// ListRepositoriesForApprovalRuleTemplateRequest is the request type for the +// ListRepositoriesForApprovalRuleTemplate API operation. +type ListRepositoriesForApprovalRuleTemplateRequest struct { + *aws.Request + Input *ListRepositoriesForApprovalRuleTemplateInput + Copy func(*ListRepositoriesForApprovalRuleTemplateInput) ListRepositoriesForApprovalRuleTemplateRequest +} + +// Send marshals and sends the ListRepositoriesForApprovalRuleTemplate API request. +func (r ListRepositoriesForApprovalRuleTemplateRequest) Send(ctx context.Context) (*ListRepositoriesForApprovalRuleTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListRepositoriesForApprovalRuleTemplateResponse{ + ListRepositoriesForApprovalRuleTemplateOutput: r.Request.Data.(*ListRepositoriesForApprovalRuleTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListRepositoriesForApprovalRuleTemplateRequestPaginator returns a paginator for ListRepositoriesForApprovalRuleTemplate. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListRepositoriesForApprovalRuleTemplateRequest(input) +// p := codecommit.NewListRepositoriesForApprovalRuleTemplateRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListRepositoriesForApprovalRuleTemplatePaginator(req ListRepositoriesForApprovalRuleTemplateRequest) ListRepositoriesForApprovalRuleTemplatePaginator { + return ListRepositoriesForApprovalRuleTemplatePaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListRepositoriesForApprovalRuleTemplateInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListRepositoriesForApprovalRuleTemplatePaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListRepositoriesForApprovalRuleTemplatePaginator struct { + aws.Pager +} + +func (p *ListRepositoriesForApprovalRuleTemplatePaginator) CurrentPage() *ListRepositoriesForApprovalRuleTemplateOutput { + return p.Pager.CurrentPage().(*ListRepositoriesForApprovalRuleTemplateOutput) +} + +// ListRepositoriesForApprovalRuleTemplateResponse is the response type for the +// ListRepositoriesForApprovalRuleTemplate API operation. +type ListRepositoriesForApprovalRuleTemplateResponse struct { + *ListRepositoriesForApprovalRuleTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListRepositoriesForApprovalRuleTemplate request. +func (r *ListRepositoriesForApprovalRuleTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_ListTagsForResource.go b/service/codecommit/api_op_ListTagsForResource.go index b3d4cac42ba..50aa4dc4ef6 100644 --- a/service/codecommit/api_op_ListTagsForResource.go +++ b/service/codecommit/api_op_ListTagsForResource.go @@ -12,7 +12,7 @@ import ( type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // An enumeration token that when provided in a request, returns the next batch + // An enumeration token that, when provided in a request, returns the next batch // of the results. NextToken *string `locationName:"nextToken" type:"string"` diff --git a/service/codecommit/api_op_MergeBranchesByFastForward.go b/service/codecommit/api_op_MergeBranchesByFastForward.go index b5895e88314..af25ebfd28b 100644 --- a/service/codecommit/api_op_MergeBranchesByFastForward.go +++ b/service/codecommit/api_op_MergeBranchesByFastForward.go @@ -13,7 +13,7 @@ type MergeBranchesByFastForwardInput struct { _ struct{} `type:"structure"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` @@ -24,12 +24,12 @@ type MergeBranchesByFastForwardInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` - // The branch where the merge will be applied. + // The branch where the merge is applied. TargetBranch *string `locationName:"targetBranch" min:"1" type:"string"` } diff --git a/service/codecommit/api_op_MergeBranchesBySquash.go b/service/codecommit/api_op_MergeBranchesBySquash.go index 2e1af769f1d..d0788a42d80 100644 --- a/service/codecommit/api_op_MergeBranchesBySquash.go +++ b/service/codecommit/api_op_MergeBranchesBySquash.go @@ -12,43 +12,42 @@ import ( type MergeBranchesBySquashInput struct { _ struct{} `type:"structure"` - // The name of the author who created the commit. This information will be used - // as both the author and committer for the commit. + // The name of the author who created the commit. This information is used as + // both the author and committer for the commit. AuthorName *string `locationName:"authorName" type:"string"` // The commit message for the merge. CommitMessage *string `locationName:"commitMessage" type:"string"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` - // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE - // is chosen as the conflict resolution strategy. + // If AUTOMERGE is the conflict resolution strategy, a list of inputs to use + // when resolving conflicts during a merge. ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` - // The email address of the person merging the branches. This information will - // be used in the commit information for the merge. + // The email address of the person merging the branches. This information is + // used in the commit information for the merge. Email *string `locationName:"email" type:"string"` // If the commit contains deletions, whether to keep a folder or folder structure // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. The default is false. + // file is created for empty folders. The default is false. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` // The name of the repository where you want to merge two branches. @@ -57,12 +56,12 @@ type MergeBranchesBySquashInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` - // The branch where the merge will be applied. + // The branch where the merge is applied. TargetBranch *string `locationName:"targetBranch" min:"1" type:"string"` } diff --git a/service/codecommit/api_op_MergeBranchesByThreeWay.go b/service/codecommit/api_op_MergeBranchesByThreeWay.go index bf04f61d1e9..ad326fe7edf 100644 --- a/service/codecommit/api_op_MergeBranchesByThreeWay.go +++ b/service/codecommit/api_op_MergeBranchesByThreeWay.go @@ -12,43 +12,42 @@ import ( type MergeBranchesByThreeWayInput struct { _ struct{} `type:"structure"` - // The name of the author who created the commit. This information will be used - // as both the author and committer for the commit. + // The name of the author who created the commit. This information is used as + // both the author and committer for the commit. AuthorName *string `locationName:"authorName" type:"string"` // The commit message to include in the commit information for the merge. CommitMessage *string `locationName:"commitMessage" type:"string"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` - // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE - // is chosen as the conflict resolution strategy. + // If AUTOMERGE is the conflict resolution strategy, a list of inputs to use + // when resolving conflicts during a merge. ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // DestinationCommitSpecifier is a required field DestinationCommitSpecifier *string `locationName:"destinationCommitSpecifier" type:"string" required:"true"` - // The email address of the person merging the branches. This information will - // be used in the commit information for the merge. + // The email address of the person merging the branches. This information is + // used in the commit information for the merge. Email *string `locationName:"email" type:"string"` // If the commit contains deletions, whether to keep a folder or folder structure - // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. The default is false. + // if the changes leave the folders empty. If true, a .gitkeep file is created + // for empty folders. The default is false. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` // The name of the repository where you want to merge two branches. @@ -57,12 +56,12 @@ type MergeBranchesByThreeWayInput struct { RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` // The branch, tag, HEAD, or other fully qualified reference used to identify - // a commit. For example, a branch name or a full commit ID. + // a commit (for example, a branch name or a full commit ID). // // SourceCommitSpecifier is a required field SourceCommitSpecifier *string `locationName:"sourceCommitSpecifier" type:"string" required:"true"` - // The branch where the merge will be applied. + // The branch where the merge is applied. TargetBranch *string `locationName:"targetBranch" min:"1" type:"string"` } diff --git a/service/codecommit/api_op_MergePullRequestByFastForward.go b/service/codecommit/api_op_MergePullRequestByFastForward.go index 5fc00e64993..304d46e5198 100644 --- a/service/codecommit/api_op_MergePullRequestByFastForward.go +++ b/service/codecommit/api_op_MergePullRequestByFastForward.go @@ -57,8 +57,7 @@ func (s *MergePullRequestByFastForwardInput) Validate() error { type MergePullRequestByFastForwardOutput struct { _ struct{} `type:"structure"` - // Information about the specified pull request, including information about - // the merge. + // Information about the specified pull request, including the merge. PullRequest *PullRequest `locationName:"pullRequest" type:"structure"` } diff --git a/service/codecommit/api_op_MergePullRequestBySquash.go b/service/codecommit/api_op_MergePullRequestBySquash.go index 8c02188a6a9..0efc6eca89c 100644 --- a/service/codecommit/api_op_MergePullRequestBySquash.go +++ b/service/codecommit/api_op_MergePullRequestBySquash.go @@ -12,37 +12,36 @@ import ( type MergePullRequestBySquashInput struct { _ struct{} `type:"structure"` - // The name of the author who created the commit. This information will be used - // as both the author and committer for the commit. + // The name of the author who created the commit. This information is used as + // both the author and committer for the commit. AuthorName *string `locationName:"authorName" type:"string"` // The commit message to include in the commit information for the merge. CommitMessage *string `locationName:"commitMessage" type:"string"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` - // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE - // is chosen as the conflict resolution strategy. + // If AUTOMERGE is the conflict resolution strategy, a list of inputs to use + // when resolving conflicts during a merge. ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` - // The email address of the person merging the branches. This information will - // be used in the commit information for the merge. + // The email address of the person merging the branches. This information is + // used in the commit information for the merge. Email *string `locationName:"email" type:"string"` // If the commit contains deletions, whether to keep a folder or folder structure - // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. The default is false. + // if the changes leave the folders empty. If true, a .gitkeep file is created + // for empty folders. The default is false. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` // The system-generated ID of the pull request. To get this ID, use ListPullRequests. diff --git a/service/codecommit/api_op_MergePullRequestByThreeWay.go b/service/codecommit/api_op_MergePullRequestByThreeWay.go index e01bf2a22d9..a0e8556843f 100644 --- a/service/codecommit/api_op_MergePullRequestByThreeWay.go +++ b/service/codecommit/api_op_MergePullRequestByThreeWay.go @@ -12,37 +12,36 @@ import ( type MergePullRequestByThreeWayInput struct { _ struct{} `type:"structure"` - // The name of the author who created the commit. This information will be used - // as both the author and committer for the commit. + // The name of the author who created the commit. This information is used as + // both the author and committer for the commit. AuthorName *string `locationName:"authorName" type:"string"` // The commit message to include in the commit information for the merge. CommitMessage *string `locationName:"commitMessage" type:"string"` // The level of conflict detail to use. If unspecified, the default FILE_LEVEL - // is used, which will return a not mergeable result if the same file has differences - // in both branches. If LINE_LEVEL is specified, a conflict will be considered - // not mergeable if the same file in both branches has differences on the same - // line. + // is used, which returns a not-mergeable result if the same file has differences + // in both branches. If LINE_LEVEL is specified, a conflict is considered not + // mergeable if the same file in both branches has differences on the same line. ConflictDetailLevel ConflictDetailLevelTypeEnum `locationName:"conflictDetailLevel" type:"string" enum:"true"` - // A list of inputs to use when resolving conflicts during a merge if AUTOMERGE - // is chosen as the conflict resolution strategy. + // If AUTOMERGE is the conflict resolution strategy, a list of inputs to use + // when resolving conflicts during a merge. ConflictResolution *ConflictResolution `locationName:"conflictResolution" type:"structure"` // Specifies which branch to use when resolving conflicts, or whether to attempt // automatically merging two versions of a file. The default is NONE, which // requires any conflicts to be resolved manually before the merge operation - // will be successful. + // is successful. ConflictResolutionStrategy ConflictResolutionStrategyTypeEnum `locationName:"conflictResolutionStrategy" type:"string" enum:"true"` - // The email address of the person merging the branches. This information will - // be used in the commit information for the merge. + // The email address of the person merging the branches. This information is + // used in the commit information for the merge. Email *string `locationName:"email" type:"string"` // If the commit contains deletions, whether to keep a folder or folder structure - // if the changes leave the folders empty. If this is specified as true, a .gitkeep - // file will be created for empty folders. The default is false. + // if the changes leave the folders empty. If true, a .gitkeep file is created + // for empty folders. The default is false. KeepEmptyFolders *bool `locationName:"keepEmptyFolders" type:"boolean"` // The system-generated ID of the pull request. To get this ID, use ListPullRequests. diff --git a/service/codecommit/api_op_OverridePullRequestApprovalRules.go b/service/codecommit/api_op_OverridePullRequestApprovalRules.go new file mode 100644 index 00000000000..ab1dd3509de --- /dev/null +++ b/service/codecommit/api_op_OverridePullRequestApprovalRules.go @@ -0,0 +1,142 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" +) + +type OverridePullRequestApprovalRulesInput struct { + _ struct{} `type:"structure"` + + // Whether you want to set aside approval rule requirements for the pull request + // (OVERRIDE) or revoke a previous override and apply approval rule requirements + // (REVOKE). REVOKE status is not stored. + // + // OverrideStatus is a required field + OverrideStatus OverrideStatus `locationName:"overrideStatus" type:"string" required:"true" enum:"true"` + + // The system-generated ID of the pull request for which you want to override + // all approval rule requirements. To get this information, use GetPullRequest. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + + // The system-generated ID of the most recent revision of the pull request. + // You cannot override approval rules for anything but the most recent revision + // of a pull request. To get the revision ID, use GetPullRequest. + // + // RevisionId is a required field + RevisionId *string `locationName:"revisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s OverridePullRequestApprovalRulesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OverridePullRequestApprovalRulesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "OverridePullRequestApprovalRulesInput"} + if len(s.OverrideStatus) == 0 { + invalidParams.Add(aws.NewErrParamRequired("OverrideStatus")) + } + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type OverridePullRequestApprovalRulesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s OverridePullRequestApprovalRulesOutput) String() string { + return awsutil.Prettify(s) +} + +const opOverridePullRequestApprovalRules = "OverridePullRequestApprovalRules" + +// OverridePullRequestApprovalRulesRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Sets aside (overrides) all approval rule requirements for a specified pull +// request. +// +// // Example sending a request using OverridePullRequestApprovalRulesRequest. +// req := client.OverridePullRequestApprovalRulesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/OverridePullRequestApprovalRules +func (c *Client) OverridePullRequestApprovalRulesRequest(input *OverridePullRequestApprovalRulesInput) OverridePullRequestApprovalRulesRequest { + op := &aws.Operation{ + Name: opOverridePullRequestApprovalRules, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &OverridePullRequestApprovalRulesInput{} + } + + req := c.newRequest(op, input, &OverridePullRequestApprovalRulesOutput{}) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return OverridePullRequestApprovalRulesRequest{Request: req, Input: input, Copy: c.OverridePullRequestApprovalRulesRequest} +} + +// OverridePullRequestApprovalRulesRequest is the request type for the +// OverridePullRequestApprovalRules API operation. +type OverridePullRequestApprovalRulesRequest struct { + *aws.Request + Input *OverridePullRequestApprovalRulesInput + Copy func(*OverridePullRequestApprovalRulesInput) OverridePullRequestApprovalRulesRequest +} + +// Send marshals and sends the OverridePullRequestApprovalRules API request. +func (r OverridePullRequestApprovalRulesRequest) Send(ctx context.Context) (*OverridePullRequestApprovalRulesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &OverridePullRequestApprovalRulesResponse{ + OverridePullRequestApprovalRulesOutput: r.Request.Data.(*OverridePullRequestApprovalRulesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// OverridePullRequestApprovalRulesResponse is the response type for the +// OverridePullRequestApprovalRules API operation. +type OverridePullRequestApprovalRulesResponse struct { + *OverridePullRequestApprovalRulesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// OverridePullRequestApprovalRules request. +func (r *OverridePullRequestApprovalRulesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_PostCommentForComparedCommit.go b/service/codecommit/api_op_PostCommentForComparedCommit.go index 25f495f98a9..909ffc0377e 100644 --- a/service/codecommit/api_op_PostCommentForComparedCommit.go +++ b/service/codecommit/api_op_PostCommentForComparedCommit.go @@ -13,22 +13,20 @@ type PostCommentForComparedCommitInput struct { _ struct{} `type:"structure"` // To establish the directionality of the comparison, the full commit ID of - // the 'after' commit. + // the after commit. // // AfterCommitId is a required field AfterCommitId *string `locationName:"afterCommitId" type:"string" required:"true"` // To establish the directionality of the comparison, the full commit ID of - // the 'before' commit. - // - // This is required for commenting on any commit unless that commit is the initial - // commit. + // the before commit. Required for commenting on any commit unless that commit + // is the initial commit. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` - // A unique, client-generated idempotency token that when provided in a request, + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // returns information about the initial request that used that token. ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` // The content of the comment you want to make. @@ -79,18 +77,16 @@ func (s *PostCommentForComparedCommitInput) Validate() error { type PostCommentForComparedCommitOutput struct { _ struct{} `type:"structure"` - // In the directionality you established, the blob ID of the 'after' blob. + // In the directionality you established, the blob ID of the after blob. AfterBlobId *string `locationName:"afterBlobId" type:"string"` - // In the directionality you established, the full commit ID of the 'after' - // commit. + // In the directionality you established, the full commit ID of the after commit. AfterCommitId *string `locationName:"afterCommitId" type:"string"` - // In the directionality you established, the blob ID of the 'before' blob. + // In the directionality you established, the blob ID of the before blob. BeforeBlobId *string `locationName:"beforeBlobId" type:"string"` - // In the directionality you established, the full commit ID of the 'before' - // commit. + // In the directionality you established, the full commit ID of the before commit. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` // The content of the comment you posted. diff --git a/service/codecommit/api_op_PostCommentForPullRequest.go b/service/codecommit/api_op_PostCommentForPullRequest.go index 5e88d1b28c4..c1f1734cbc5 100644 --- a/service/codecommit/api_op_PostCommentForPullRequest.go +++ b/service/codecommit/api_op_PostCommentForPullRequest.go @@ -24,10 +24,10 @@ type PostCommentForPullRequestInput struct { // BeforeCommitId is a required field BeforeCommitId *string `locationName:"beforeCommitId" type:"string" required:"true"` - // A unique, client-generated idempotency token that when provided in a request, + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // returns information about the initial request that used that token. ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` // The content of your comment on the change. @@ -36,8 +36,8 @@ type PostCommentForPullRequestInput struct { Content *string `locationName:"content" type:"string" required:"true"` // The location of the change where you want to post your comment. If no location - // is provided, the comment will be posted as a general comment on the pull - // request difference between the before commit ID and the after commit ID. + // is provided, the comment is posted as a general comment on the pull request + // difference between the before commit ID and the after commit ID. Location *Location `locationName:"location" type:"structure"` // The system-generated ID of the pull request. To get this ID, use ListPullRequests. @@ -92,14 +92,14 @@ func (s *PostCommentForPullRequestInput) Validate() error { type PostCommentForPullRequestOutput struct { _ struct{} `type:"structure"` - // In the directionality of the pull request, the blob ID of the 'after' blob. + // In the directionality of the pull request, the blob ID of the after blob. AfterBlobId *string `locationName:"afterBlobId" type:"string"` // The full commit ID of the commit in the destination branch where the pull - // request will be merged. + // request is merged. AfterCommitId *string `locationName:"afterCommitId" type:"string"` - // In the directionality of the pull request, the blob ID of the 'before' blob. + // In the directionality of the pull request, the blob ID of the before blob. BeforeBlobId *string `locationName:"beforeBlobId" type:"string"` // The full commit ID of the commit in the source branch used to create the diff --git a/service/codecommit/api_op_PostCommentReply.go b/service/codecommit/api_op_PostCommentReply.go index dc516483805..cc2a16c644b 100644 --- a/service/codecommit/api_op_PostCommentReply.go +++ b/service/codecommit/api_op_PostCommentReply.go @@ -12,10 +12,10 @@ import ( type PostCommentReplyInput struct { _ struct{} `type:"structure"` - // A unique, client-generated idempotency token that when provided in a request, + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // returns information about the initial request that used that token. ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` // The contents of your reply to a comment. diff --git a/service/codecommit/api_op_PutFile.go b/service/codecommit/api_op_PutFile.go index cfff5a4a103..499510ba37f 100644 --- a/service/codecommit/api_op_PutFile.go +++ b/service/codecommit/api_op_PutFile.go @@ -13,14 +13,13 @@ type PutFileInput struct { _ struct{} `type:"structure"` // The name of the branch where you want to add or update the file. If this - // is an empty repository, this branch will be created. + // is an empty repository, this branch is created. // // BranchName is a required field BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` - // A message about why this file was added or updated. While optional, adding - // a message is strongly encouraged in order to provide a more useful commit - // history for your repository. + // A message about why this file was added or updated. Although it is optional, + // a message makes the commit history for your repository more useful. CommitMessage *string `locationName:"commitMessage" type:"string"` // An email address for the person adding or updating the file. @@ -34,29 +33,28 @@ type PutFileInput struct { FileContent []byte `locationName:"fileContent" type:"blob" required:"true"` // The file mode permissions of the blob. Valid file mode permissions are listed - // below. + // here. FileMode FileModeTypeEnum `locationName:"fileMode" type:"string" enum:"true"` // The name of the file you want to add or update, including the relative path // to the file in the repository. // - // If the path does not currently exist in the repository, the path will be - // created as part of adding the file. + // If the path does not currently exist in the repository, the path is created + // as part of adding the file. // // FilePath is a required field FilePath *string `locationName:"filePath" type:"string" required:"true"` - // The name of the person adding or updating the file. While optional, adding - // a name is strongly encouraged in order to provide a more useful commit history - // for your repository. + // The name of the person adding or updating the file. Although it is optional, + // a name makes the commit history for your repository more useful. Name *string `locationName:"name" type:"string"` // The full commit ID of the head commit in the branch where you want to add // or update the file. If this is an empty repository, no commit ID is required. // If this is not an empty repository, a commit ID is required. // - // The commit ID must match the ID of the head commit at the time of the operation, - // or an error will occur, and the file will not be added or updated. + // The commit ID must match the ID of the head commit at the time of the operation. + // Otherwise, an error occurs, and the file is not added or updated. ParentCommitId *string `locationName:"parentCommitId" type:"string"` // The name of the repository where you want to add or update the file. @@ -110,7 +108,7 @@ type PutFileOutput struct { // BlobId is a required field BlobId *string `locationName:"blobId" type:"string" required:"true"` - // The full SHA of the commit that contains this file change. + // The full SHA ID of the commit that contains this file change. // // CommitId is a required field CommitId *string `locationName:"commitId" type:"string" required:"true"` diff --git a/service/codecommit/api_op_PutRepositoryTriggers.go b/service/codecommit/api_op_PutRepositoryTriggers.go index a00aecc067b..87c11af35b7 100644 --- a/service/codecommit/api_op_PutRepositoryTriggers.go +++ b/service/codecommit/api_op_PutRepositoryTriggers.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// Represents the input ofa put repository triggers operation. +// Represents the input of a put repository triggers operation. type PutRepositoryTriggersInput struct { _ struct{} `type:"structure"` @@ -76,8 +76,7 @@ const opPutRepositoryTriggers = "PutRepositoryTriggers" // PutRepositoryTriggersRequest returns a request value for making API operation for // AWS CodeCommit. // -// Replaces all triggers for a repository. This can be used to create or delete -// triggers. +// Replaces all triggers for a repository. Used to create or delete triggers. // // // Example sending a request using PutRepositoryTriggersRequest. // req := client.PutRepositoryTriggersRequest(params) diff --git a/service/codecommit/api_op_TestRepositoryTriggers.go b/service/codecommit/api_op_TestRepositoryTriggers.go index 7f25c565e36..832c7698650 100644 --- a/service/codecommit/api_op_TestRepositoryTriggers.go +++ b/service/codecommit/api_op_TestRepositoryTriggers.go @@ -62,8 +62,8 @@ func (s *TestRepositoryTriggersInput) Validate() error { type TestRepositoryTriggersOutput struct { _ struct{} `type:"structure"` - // The list of triggers that were not able to be tested. This list provides - // the names of the triggers that could not be tested, separated by commas. + // The list of triggers that were not tested. This list provides the names of + // the triggers that could not be tested, separated by commas. FailedExecutions []RepositoryTriggerExecutionFailure `locationName:"failedExecutions" type:"list"` // The list of triggers that were successfully tested. This list provides the @@ -83,8 +83,8 @@ const opTestRepositoryTriggers = "TestRepositoryTriggers" // // Tests the functionality of repository triggers by sending information to // the trigger target. If real data is available in the repository, the test -// will send data from the last commit. If no data is available, sample data -// will be generated. +// sends data from the last commit. If no data is available, sample data is +// generated. // // // Example sending a request using TestRepositoryTriggersRequest. // req := client.TestRepositoryTriggersRequest(params) diff --git a/service/codecommit/api_op_UpdateApprovalRuleTemplateContent.go b/service/codecommit/api_op_UpdateApprovalRuleTemplateContent.go new file mode 100644 index 00000000000..bad917f75e8 --- /dev/null +++ b/service/codecommit/api_op_UpdateApprovalRuleTemplateContent.go @@ -0,0 +1,143 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type UpdateApprovalRuleTemplateContentInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule template where you want to update the content + // of the rule. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` + + // The SHA-256 hash signature for the content of the approval rule. You can + // retrieve this information by using GetPullRequest. + ExistingRuleContentSha256 *string `locationName:"existingRuleContentSha256" type:"string"` + + // The content that replaces the existing content of the rule. Content statements + // must be complete. You cannot provide only the changes. + // + // NewRuleContent is a required field + NewRuleContent *string `locationName:"newRuleContent" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateApprovalRuleTemplateContentInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateApprovalRuleTemplateContentInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateApprovalRuleTemplateContentInput"} + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if s.NewRuleContent == nil { + invalidParams.Add(aws.NewErrParamRequired("NewRuleContent")) + } + if s.NewRuleContent != nil && len(*s.NewRuleContent) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewRuleContent", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateApprovalRuleTemplateContentOutput struct { + _ struct{} `type:"structure"` + + // Returns information about an approval rule template. + // + // ApprovalRuleTemplate is a required field + ApprovalRuleTemplate *ApprovalRuleTemplate `locationName:"approvalRuleTemplate" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateApprovalRuleTemplateContentOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateApprovalRuleTemplateContent = "UpdateApprovalRuleTemplateContent" + +// UpdateApprovalRuleTemplateContentRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Updates the content of an approval rule template. You can change the number +// of required approvals, the membership of the approval rule, and whether an +// approval pool is defined. +// +// // Example sending a request using UpdateApprovalRuleTemplateContentRequest. +// req := client.UpdateApprovalRuleTemplateContentRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateApprovalRuleTemplateContent +func (c *Client) UpdateApprovalRuleTemplateContentRequest(input *UpdateApprovalRuleTemplateContentInput) UpdateApprovalRuleTemplateContentRequest { + op := &aws.Operation{ + Name: opUpdateApprovalRuleTemplateContent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApprovalRuleTemplateContentInput{} + } + + req := c.newRequest(op, input, &UpdateApprovalRuleTemplateContentOutput{}) + return UpdateApprovalRuleTemplateContentRequest{Request: req, Input: input, Copy: c.UpdateApprovalRuleTemplateContentRequest} +} + +// UpdateApprovalRuleTemplateContentRequest is the request type for the +// UpdateApprovalRuleTemplateContent API operation. +type UpdateApprovalRuleTemplateContentRequest struct { + *aws.Request + Input *UpdateApprovalRuleTemplateContentInput + Copy func(*UpdateApprovalRuleTemplateContentInput) UpdateApprovalRuleTemplateContentRequest +} + +// Send marshals and sends the UpdateApprovalRuleTemplateContent API request. +func (r UpdateApprovalRuleTemplateContentRequest) Send(ctx context.Context) (*UpdateApprovalRuleTemplateContentResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateApprovalRuleTemplateContentResponse{ + UpdateApprovalRuleTemplateContentOutput: r.Request.Data.(*UpdateApprovalRuleTemplateContentOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateApprovalRuleTemplateContentResponse is the response type for the +// UpdateApprovalRuleTemplateContent API operation. +type UpdateApprovalRuleTemplateContentResponse struct { + *UpdateApprovalRuleTemplateContentOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateApprovalRuleTemplateContent request. +func (r *UpdateApprovalRuleTemplateContentResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_UpdateApprovalRuleTemplateDescription.go b/service/codecommit/api_op_UpdateApprovalRuleTemplateDescription.go new file mode 100644 index 00000000000..dc1e4be8cec --- /dev/null +++ b/service/codecommit/api_op_UpdateApprovalRuleTemplateDescription.go @@ -0,0 +1,132 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type UpdateApprovalRuleTemplateDescriptionInput struct { + _ struct{} `type:"structure"` + + // The updated description of the approval rule template. + // + // ApprovalRuleTemplateDescription is a required field + ApprovalRuleTemplateDescription *string `locationName:"approvalRuleTemplateDescription" type:"string" required:"true"` + + // The name of the template for which you want to update the description. + // + // ApprovalRuleTemplateName is a required field + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateApprovalRuleTemplateDescriptionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateApprovalRuleTemplateDescriptionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateApprovalRuleTemplateDescriptionInput"} + + if s.ApprovalRuleTemplateDescription == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateDescription")) + } + + if s.ApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleTemplateName")) + } + if s.ApprovalRuleTemplateName != nil && len(*s.ApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleTemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateApprovalRuleTemplateDescriptionOutput struct { + _ struct{} `type:"structure"` + + // The structure and content of the updated approval rule template. + // + // ApprovalRuleTemplate is a required field + ApprovalRuleTemplate *ApprovalRuleTemplate `locationName:"approvalRuleTemplate" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateApprovalRuleTemplateDescriptionOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateApprovalRuleTemplateDescription = "UpdateApprovalRuleTemplateDescription" + +// UpdateApprovalRuleTemplateDescriptionRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Updates the description for a specified approval rule template. +// +// // Example sending a request using UpdateApprovalRuleTemplateDescriptionRequest. +// req := client.UpdateApprovalRuleTemplateDescriptionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateApprovalRuleTemplateDescription +func (c *Client) UpdateApprovalRuleTemplateDescriptionRequest(input *UpdateApprovalRuleTemplateDescriptionInput) UpdateApprovalRuleTemplateDescriptionRequest { + op := &aws.Operation{ + Name: opUpdateApprovalRuleTemplateDescription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApprovalRuleTemplateDescriptionInput{} + } + + req := c.newRequest(op, input, &UpdateApprovalRuleTemplateDescriptionOutput{}) + return UpdateApprovalRuleTemplateDescriptionRequest{Request: req, Input: input, Copy: c.UpdateApprovalRuleTemplateDescriptionRequest} +} + +// UpdateApprovalRuleTemplateDescriptionRequest is the request type for the +// UpdateApprovalRuleTemplateDescription API operation. +type UpdateApprovalRuleTemplateDescriptionRequest struct { + *aws.Request + Input *UpdateApprovalRuleTemplateDescriptionInput + Copy func(*UpdateApprovalRuleTemplateDescriptionInput) UpdateApprovalRuleTemplateDescriptionRequest +} + +// Send marshals and sends the UpdateApprovalRuleTemplateDescription API request. +func (r UpdateApprovalRuleTemplateDescriptionRequest) Send(ctx context.Context) (*UpdateApprovalRuleTemplateDescriptionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateApprovalRuleTemplateDescriptionResponse{ + UpdateApprovalRuleTemplateDescriptionOutput: r.Request.Data.(*UpdateApprovalRuleTemplateDescriptionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateApprovalRuleTemplateDescriptionResponse is the response type for the +// UpdateApprovalRuleTemplateDescription API operation. +type UpdateApprovalRuleTemplateDescriptionResponse struct { + *UpdateApprovalRuleTemplateDescriptionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateApprovalRuleTemplateDescription request. +func (r *UpdateApprovalRuleTemplateDescriptionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_UpdateApprovalRuleTemplateName.go b/service/codecommit/api_op_UpdateApprovalRuleTemplateName.go new file mode 100644 index 00000000000..618c2c24703 --- /dev/null +++ b/service/codecommit/api_op_UpdateApprovalRuleTemplateName.go @@ -0,0 +1,135 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type UpdateApprovalRuleTemplateNameInput struct { + _ struct{} `type:"structure"` + + // The new name you want to apply to the approval rule template. + // + // NewApprovalRuleTemplateName is a required field + NewApprovalRuleTemplateName *string `locationName:"newApprovalRuleTemplateName" min:"1" type:"string" required:"true"` + + // The current name of the approval rule template. + // + // OldApprovalRuleTemplateName is a required field + OldApprovalRuleTemplateName *string `locationName:"oldApprovalRuleTemplateName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateApprovalRuleTemplateNameInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateApprovalRuleTemplateNameInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateApprovalRuleTemplateNameInput"} + + if s.NewApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("NewApprovalRuleTemplateName")) + } + if s.NewApprovalRuleTemplateName != nil && len(*s.NewApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewApprovalRuleTemplateName", 1)) + } + + if s.OldApprovalRuleTemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("OldApprovalRuleTemplateName")) + } + if s.OldApprovalRuleTemplateName != nil && len(*s.OldApprovalRuleTemplateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("OldApprovalRuleTemplateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdateApprovalRuleTemplateNameOutput struct { + _ struct{} `type:"structure"` + + // The structure and content of the updated approval rule template. + // + // ApprovalRuleTemplate is a required field + ApprovalRuleTemplate *ApprovalRuleTemplate `locationName:"approvalRuleTemplate" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateApprovalRuleTemplateNameOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateApprovalRuleTemplateName = "UpdateApprovalRuleTemplateName" + +// UpdateApprovalRuleTemplateNameRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Updates the name of a specified approval rule template. +// +// // Example sending a request using UpdateApprovalRuleTemplateNameRequest. +// req := client.UpdateApprovalRuleTemplateNameRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdateApprovalRuleTemplateName +func (c *Client) UpdateApprovalRuleTemplateNameRequest(input *UpdateApprovalRuleTemplateNameInput) UpdateApprovalRuleTemplateNameRequest { + op := &aws.Operation{ + Name: opUpdateApprovalRuleTemplateName, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateApprovalRuleTemplateNameInput{} + } + + req := c.newRequest(op, input, &UpdateApprovalRuleTemplateNameOutput{}) + return UpdateApprovalRuleTemplateNameRequest{Request: req, Input: input, Copy: c.UpdateApprovalRuleTemplateNameRequest} +} + +// UpdateApprovalRuleTemplateNameRequest is the request type for the +// UpdateApprovalRuleTemplateName API operation. +type UpdateApprovalRuleTemplateNameRequest struct { + *aws.Request + Input *UpdateApprovalRuleTemplateNameInput + Copy func(*UpdateApprovalRuleTemplateNameInput) UpdateApprovalRuleTemplateNameRequest +} + +// Send marshals and sends the UpdateApprovalRuleTemplateName API request. +func (r UpdateApprovalRuleTemplateNameRequest) Send(ctx context.Context) (*UpdateApprovalRuleTemplateNameResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateApprovalRuleTemplateNameResponse{ + UpdateApprovalRuleTemplateNameOutput: r.Request.Data.(*UpdateApprovalRuleTemplateNameOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateApprovalRuleTemplateNameResponse is the response type for the +// UpdateApprovalRuleTemplateName API operation. +type UpdateApprovalRuleTemplateNameResponse struct { + *UpdateApprovalRuleTemplateNameOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateApprovalRuleTemplateName request. +func (r *UpdateApprovalRuleTemplateNameResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_UpdateComment.go b/service/codecommit/api_op_UpdateComment.go index d6aba2b030d..24782e5a506 100644 --- a/service/codecommit/api_op_UpdateComment.go +++ b/service/codecommit/api_op_UpdateComment.go @@ -18,8 +18,7 @@ type UpdateCommentInput struct { // CommentId is a required field CommentId *string `locationName:"commentId" type:"string" required:"true"` - // The updated content with which you want to replace the existing content of - // the comment. + // The updated content to replace the existing content of the comment. // // Content is a required field Content *string `locationName:"content" type:"string" required:"true"` diff --git a/service/codecommit/api_op_UpdatePullRequestApprovalRuleContent.go b/service/codecommit/api_op_UpdatePullRequestApprovalRuleContent.go new file mode 100644 index 00000000000..3f62779d784 --- /dev/null +++ b/service/codecommit/api_op_UpdatePullRequestApprovalRuleContent.go @@ -0,0 +1,172 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type UpdatePullRequestApprovalRuleContentInput struct { + _ struct{} `type:"structure"` + + // The name of the approval rule you want to update. + // + // ApprovalRuleName is a required field + ApprovalRuleName *string `locationName:"approvalRuleName" min:"1" type:"string" required:"true"` + + // The SHA-256 hash signature for the content of the approval rule. You can + // retrieve this information by using GetPullRequest. + ExistingRuleContentSha256 *string `locationName:"existingRuleContentSha256" type:"string"` + + // The updated content for the approval rule. + // + // When you update the content of the approval rule, you can specify approvers + // in an approval pool in one of two ways: + // + // * CodeCommitApprovers: This option only requires an AWS account and a + // resource. It can be used for both IAM users and federated access users + // whose name matches the provided resource name. This is a very powerful + // option that offers a great deal of flexibility. For example, if you specify + // the AWS account 123456789012 and Mary_Major, all of the following are + // counted as approvals coming from that user: An IAM user in the account + // (arn:aws:iam::123456789012:user/Mary_Major) A federated user identified + // in IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) + // This option does not recognize an active session of someone assuming the + // role of CodeCommitReview with a role session name of Mary_Major (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) + // unless you include a wildcard (*Mary_Major). + // + // * Fully qualified ARN: This option allows you to specify the fully qualified + // Amazon Resource Name (ARN) of the IAM user or role. + // + // For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers + // (https://docs.aws.amazon.com/iam/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // NewRuleContent is a required field + NewRuleContent *string `locationName:"newRuleContent" min:"1" type:"string" required:"true"` + + // The system-generated ID of the pull request. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdatePullRequestApprovalRuleContentInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePullRequestApprovalRuleContentInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdatePullRequestApprovalRuleContentInput"} + + if s.ApprovalRuleName == nil { + invalidParams.Add(aws.NewErrParamRequired("ApprovalRuleName")) + } + if s.ApprovalRuleName != nil && len(*s.ApprovalRuleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ApprovalRuleName", 1)) + } + + if s.NewRuleContent == nil { + invalidParams.Add(aws.NewErrParamRequired("NewRuleContent")) + } + if s.NewRuleContent != nil && len(*s.NewRuleContent) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewRuleContent", 1)) + } + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdatePullRequestApprovalRuleContentOutput struct { + _ struct{} `type:"structure"` + + // Information about the updated approval rule. + // + // ApprovalRule is a required field + ApprovalRule *ApprovalRule `locationName:"approvalRule" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdatePullRequestApprovalRuleContentOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdatePullRequestApprovalRuleContent = "UpdatePullRequestApprovalRuleContent" + +// UpdatePullRequestApprovalRuleContentRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Updates the structure of an approval rule created specifically for a pull +// request. For example, you can change the number of required approvers and +// the approval pool for approvers. +// +// // Example sending a request using UpdatePullRequestApprovalRuleContentRequest. +// req := client.UpdatePullRequestApprovalRuleContentRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestApprovalRuleContent +func (c *Client) UpdatePullRequestApprovalRuleContentRequest(input *UpdatePullRequestApprovalRuleContentInput) UpdatePullRequestApprovalRuleContentRequest { + op := &aws.Operation{ + Name: opUpdatePullRequestApprovalRuleContent, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePullRequestApprovalRuleContentInput{} + } + + req := c.newRequest(op, input, &UpdatePullRequestApprovalRuleContentOutput{}) + return UpdatePullRequestApprovalRuleContentRequest{Request: req, Input: input, Copy: c.UpdatePullRequestApprovalRuleContentRequest} +} + +// UpdatePullRequestApprovalRuleContentRequest is the request type for the +// UpdatePullRequestApprovalRuleContent API operation. +type UpdatePullRequestApprovalRuleContentRequest struct { + *aws.Request + Input *UpdatePullRequestApprovalRuleContentInput + Copy func(*UpdatePullRequestApprovalRuleContentInput) UpdatePullRequestApprovalRuleContentRequest +} + +// Send marshals and sends the UpdatePullRequestApprovalRuleContent API request. +func (r UpdatePullRequestApprovalRuleContentRequest) Send(ctx context.Context) (*UpdatePullRequestApprovalRuleContentResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdatePullRequestApprovalRuleContentResponse{ + UpdatePullRequestApprovalRuleContentOutput: r.Request.Data.(*UpdatePullRequestApprovalRuleContentOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdatePullRequestApprovalRuleContentResponse is the response type for the +// UpdatePullRequestApprovalRuleContent API operation. +type UpdatePullRequestApprovalRuleContentResponse struct { + *UpdatePullRequestApprovalRuleContentOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdatePullRequestApprovalRuleContent request. +func (r *UpdatePullRequestApprovalRuleContentResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_UpdatePullRequestApprovalState.go b/service/codecommit/api_op_UpdatePullRequestApprovalState.go new file mode 100644 index 00000000000..7051fcd86bc --- /dev/null +++ b/service/codecommit/api_op_UpdatePullRequestApprovalState.go @@ -0,0 +1,137 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package codecommit + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" +) + +type UpdatePullRequestApprovalStateInput struct { + _ struct{} `type:"structure"` + + // The approval state to associate with the user on the pull request. + // + // ApprovalState is a required field + ApprovalState ApprovalState `locationName:"approvalState" type:"string" required:"true" enum:"true"` + + // The system-generated ID of the pull request. + // + // PullRequestId is a required field + PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` + + // The system-generated ID of the revision. + // + // RevisionId is a required field + RevisionId *string `locationName:"revisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdatePullRequestApprovalStateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePullRequestApprovalStateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdatePullRequestApprovalStateInput"} + if len(s.ApprovalState) == 0 { + invalidParams.Add(aws.NewErrParamRequired("ApprovalState")) + } + + if s.PullRequestId == nil { + invalidParams.Add(aws.NewErrParamRequired("PullRequestId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UpdatePullRequestApprovalStateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdatePullRequestApprovalStateOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdatePullRequestApprovalState = "UpdatePullRequestApprovalState" + +// UpdatePullRequestApprovalStateRequest returns a request value for making API operation for +// AWS CodeCommit. +// +// Updates the state of a user's approval on a pull request. The user is derived +// from the signed-in account when the request is made. +// +// // Example sending a request using UpdatePullRequestApprovalStateRequest. +// req := client.UpdatePullRequestApprovalStateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/UpdatePullRequestApprovalState +func (c *Client) UpdatePullRequestApprovalStateRequest(input *UpdatePullRequestApprovalStateInput) UpdatePullRequestApprovalStateRequest { + op := &aws.Operation{ + Name: opUpdatePullRequestApprovalState, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePullRequestApprovalStateInput{} + } + + req := c.newRequest(op, input, &UpdatePullRequestApprovalStateOutput{}) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdatePullRequestApprovalStateRequest{Request: req, Input: input, Copy: c.UpdatePullRequestApprovalStateRequest} +} + +// UpdatePullRequestApprovalStateRequest is the request type for the +// UpdatePullRequestApprovalState API operation. +type UpdatePullRequestApprovalStateRequest struct { + *aws.Request + Input *UpdatePullRequestApprovalStateInput + Copy func(*UpdatePullRequestApprovalStateInput) UpdatePullRequestApprovalStateRequest +} + +// Send marshals and sends the UpdatePullRequestApprovalState API request. +func (r UpdatePullRequestApprovalStateRequest) Send(ctx context.Context) (*UpdatePullRequestApprovalStateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdatePullRequestApprovalStateResponse{ + UpdatePullRequestApprovalStateOutput: r.Request.Data.(*UpdatePullRequestApprovalStateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdatePullRequestApprovalStateResponse is the response type for the +// UpdatePullRequestApprovalState API operation. +type UpdatePullRequestApprovalStateResponse struct { + *UpdatePullRequestApprovalStateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdatePullRequestApprovalState request. +func (r *UpdatePullRequestApprovalStateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/codecommit/api_op_UpdatePullRequestDescription.go b/service/codecommit/api_op_UpdatePullRequestDescription.go index eee85ee6914..c9632af6031 100644 --- a/service/codecommit/api_op_UpdatePullRequestDescription.go +++ b/service/codecommit/api_op_UpdatePullRequestDescription.go @@ -13,7 +13,7 @@ type UpdatePullRequestDescriptionInput struct { _ struct{} `type:"structure"` // The updated content of the description for the pull request. This content - // will replace the existing description. + // replaces the existing description. // // Description is a required field Description *string `locationName:"description" type:"string" required:"true"` diff --git a/service/codecommit/api_op_UpdatePullRequestStatus.go b/service/codecommit/api_op_UpdatePullRequestStatus.go index 280171b3de6..0b510b0b441 100644 --- a/service/codecommit/api_op_UpdatePullRequestStatus.go +++ b/service/codecommit/api_op_UpdatePullRequestStatus.go @@ -18,7 +18,7 @@ type UpdatePullRequestStatusInput struct { PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` // The status of the pull request. The only valid operations are to update the - // status from OPEN to OPEN, OPEN to CLOSED or from from CLOSED to CLOSED. + // status from OPEN to OPEN, OPEN to CLOSED or from CLOSED to CLOSED. // // PullRequestStatus is a required field PullRequestStatus PullRequestStatusEnum `locationName:"pullRequestStatus" type:"string" required:"true" enum:"true"` diff --git a/service/codecommit/api_op_UpdatePullRequestTitle.go b/service/codecommit/api_op_UpdatePullRequestTitle.go index d3e31f2cd81..c1f705346ec 100644 --- a/service/codecommit/api_op_UpdatePullRequestTitle.go +++ b/service/codecommit/api_op_UpdatePullRequestTitle.go @@ -17,7 +17,7 @@ type UpdatePullRequestTitleInput struct { // PullRequestId is a required field PullRequestId *string `locationName:"pullRequestId" type:"string" required:"true"` - // The updated title of the pull request. This will replace the existing title. + // The updated title of the pull request. This replaces the existing title. // // Title is a required field Title *string `locationName:"title" type:"string" required:"true"` diff --git a/service/codecommit/api_op_UpdateRepositoryDescription.go b/service/codecommit/api_op_UpdateRepositoryDescription.go index 05180c2e026..f3855b39a45 100644 --- a/service/codecommit/api_op_UpdateRepositoryDescription.go +++ b/service/codecommit/api_op_UpdateRepositoryDescription.go @@ -65,9 +65,9 @@ const opUpdateRepositoryDescription = "UpdateRepositoryDescription" // // The description field for a repository accepts all HTML characters and all // valid Unicode characters. Applications that do not HTML-encode the description -// and display it in a web page could expose users to potentially malicious -// code. Make sure that you HTML-encode the description field in any application -// that uses this API to display the repository description on a web page. +// and display it in a webpage can expose users to potentially malicious code. +// Make sure that you HTML-encode the description field in any application that +// uses this API to display the repository description on a webpage. // // // Example sending a request using UpdateRepositoryDescriptionRequest. // req := client.UpdateRepositoryDescriptionRequest(params) diff --git a/service/codecommit/api_op_UpdateRepositoryName.go b/service/codecommit/api_op_UpdateRepositoryName.go index 5218f0f8298..cbcc5f598dc 100644 --- a/service/codecommit/api_op_UpdateRepositoryName.go +++ b/service/codecommit/api_op_UpdateRepositoryName.go @@ -20,7 +20,7 @@ type UpdateRepositoryNameInput struct { // NewName is a required field NewName *string `locationName:"newName" min:"1" type:"string" required:"true"` - // The existing name of the repository. + // The current name of the repository. // // OldName is a required field OldName *string `locationName:"oldName" min:"1" type:"string" required:"true"` @@ -70,10 +70,10 @@ const opUpdateRepositoryName = "UpdateRepositoryName" // AWS CodeCommit. // // Renames a repository. The repository name must be unique across the calling -// AWS account. In addition, repository names are limited to 100 alphanumeric, -// dash, and underscore characters, and cannot include certain characters. The -// suffix ".git" is prohibited. For a full description of the limits on repository -// names, see Limits (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) +// AWS account. Repository names are limited to 100 alphanumeric, dash, and +// underscore characters, and cannot include certain characters. The suffix +// .git is prohibited. For more information about the limits on repository names, +// see Limits (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html) // in the AWS CodeCommit User Guide. // // // Example sending a request using UpdateRepositoryNameRequest. diff --git a/service/codecommit/api_types.go b/service/codecommit/api_types.go index 33b19ea96f0..0852409858f 100644 --- a/service/codecommit/api_types.go +++ b/service/codecommit/api_types.go @@ -13,7 +13,168 @@ import ( var _ aws.Config var _ = awsutil.Prettify -// Information about errors in a BatchDescribeMergeConflicts operation. +// Returns information about a specific approval on a pull request. +type Approval struct { + _ struct{} `type:"structure"` + + // The state of the approval, APPROVE or REVOKE. REVOKE states are not stored. + ApprovalState ApprovalState `locationName:"approvalState" type:"string" enum:"true"` + + // The Amazon Resource Name (ARN) of the user. + UserArn *string `locationName:"userArn" type:"string"` +} + +// String returns the string representation +func (s Approval) String() string { + return awsutil.Prettify(s) +} + +// Returns information about an approval rule. +type ApprovalRule struct { + _ struct{} `type:"structure"` + + // The content of the approval rule. + ApprovalRuleContent *string `locationName:"approvalRuleContent" min:"1" type:"string"` + + // The system-generated ID of the approval rule. + ApprovalRuleId *string `locationName:"approvalRuleId" type:"string"` + + // The name of the approval rule. + ApprovalRuleName *string `locationName:"approvalRuleName" min:"1" type:"string"` + + // The date the approval rule was created, in timestamp format. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp"` + + // The date the approval rule was most recently changed, in timestamp format. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the user who made the most recent changes + // to the approval rule. + LastModifiedUser *string `locationName:"lastModifiedUser" type:"string"` + + // The approval rule template used to create the rule. + OriginApprovalRuleTemplate *OriginApprovalRuleTemplate `locationName:"originApprovalRuleTemplate" type:"structure"` + + // The SHA-256 hash signature for the content of the approval rule. + RuleContentSha256 *string `locationName:"ruleContentSha256" type:"string"` +} + +// String returns the string representation +func (s ApprovalRule) String() string { + return awsutil.Prettify(s) +} + +// Returns information about an event for an approval rule. +type ApprovalRuleEventMetadata struct { + _ struct{} `type:"structure"` + + // The content of the approval rule. + ApprovalRuleContent *string `locationName:"approvalRuleContent" min:"1" type:"string"` + + // The system-generated ID of the approval rule. + ApprovalRuleId *string `locationName:"approvalRuleId" type:"string"` + + // The name of the approval rule. + ApprovalRuleName *string `locationName:"approvalRuleName" min:"1" type:"string"` +} + +// String returns the string representation +func (s ApprovalRuleEventMetadata) String() string { + return awsutil.Prettify(s) +} + +// Returns information about an override event for approval rules for a pull +// request. +type ApprovalRuleOverriddenEventMetadata struct { + _ struct{} `type:"structure"` + + // The status of the override event. + OverrideStatus OverrideStatus `locationName:"overrideStatus" type:"string" enum:"true"` + + // The revision ID of the pull request when the override event occurred. + RevisionId *string `locationName:"revisionId" type:"string"` +} + +// String returns the string representation +func (s ApprovalRuleOverriddenEventMetadata) String() string { + return awsutil.Prettify(s) +} + +// Returns information about an approval rule template. +type ApprovalRuleTemplate struct { + _ struct{} `type:"structure"` + + // The content of the approval rule template. + ApprovalRuleTemplateContent *string `locationName:"approvalRuleTemplateContent" min:"1" type:"string"` + + // The description of the approval rule template. + ApprovalRuleTemplateDescription *string `locationName:"approvalRuleTemplateDescription" type:"string"` + + // The system-generated ID of the approval rule template. + ApprovalRuleTemplateId *string `locationName:"approvalRuleTemplateId" type:"string"` + + // The name of the approval rule template. + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string"` + + // The date the approval rule template was created, in timestamp format. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp"` + + // The date the approval rule template was most recently changed, in timestamp + // format. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the user who made the most recent changes + // to the approval rule template. + LastModifiedUser *string `locationName:"lastModifiedUser" type:"string"` + + // The SHA-256 hash signature for the content of the approval rule template. + RuleContentSha256 *string `locationName:"ruleContentSha256" type:"string"` +} + +// String returns the string representation +func (s ApprovalRuleTemplate) String() string { + return awsutil.Prettify(s) +} + +// Returns information about a change in the approval state for a pull request. +type ApprovalStateChangedEventMetadata struct { + _ struct{} `type:"structure"` + + // The approval status for the pull request. + ApprovalStatus ApprovalState `locationName:"approvalStatus" type:"string" enum:"true"` + + // The revision ID of the pull request when the approval state changed. + RevisionId *string `locationName:"revisionId" type:"string"` +} + +// String returns the string representation +func (s ApprovalStateChangedEventMetadata) String() string { + return awsutil.Prettify(s) +} + +// Returns information about errors in a BatchAssociateApprovalRuleTemplateWithRepositories +// operation. +type BatchAssociateApprovalRuleTemplateWithRepositoriesError struct { + _ struct{} `type:"structure"` + + // An error code that specifies whether the repository name was not valid or + // not found. + ErrorCode *string `locationName:"errorCode" type:"string"` + + // An error message that provides details about why the repository name was + // not found or not valid. + ErrorMessage *string `locationName:"errorMessage" type:"string"` + + // The name of the repository where the association was not made. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s BatchAssociateApprovalRuleTemplateWithRepositoriesError) String() string { + return awsutil.Prettify(s) +} + +// Returns information about errors in a BatchDescribeMergeConflicts operation. type BatchDescribeMergeConflictsError struct { _ struct{} `type:"structure"` @@ -38,6 +199,29 @@ func (s BatchDescribeMergeConflictsError) String() string { return awsutil.Prettify(s) } +// Returns information about errors in a BatchDisassociateApprovalRuleTemplateFromRepositories +// operation. +type BatchDisassociateApprovalRuleTemplateFromRepositoriesError struct { + _ struct{} `type:"structure"` + + // An error code that specifies whether the repository name was not valid or + // not found. + ErrorCode *string `locationName:"errorCode" type:"string"` + + // An error message that provides details about why the repository name was + // either not found or not valid. + ErrorMessage *string `locationName:"errorMessage" type:"string"` + + // The name of the repository where the association with the template was not + // able to be removed. + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string"` +} + +// String returns the string representation +func (s BatchDisassociateApprovalRuleTemplateFromRepositoriesError) String() string { + return awsutil.Prettify(s) +} + // Returns information about errors in a BatchGetCommits operation. type BatchGetCommitsError struct { _ struct{} `type:"structure"` @@ -76,7 +260,7 @@ type BlobMetadata struct { // * 120000 indicates a symlink Mode *string `locationName:"mode" type:"string"` - // The path to the blob and any associated file name, if any. + // The path to the blob and associated file name, if any. Path *string `locationName:"path" type:"string"` } @@ -108,10 +292,10 @@ type Comment struct { // The Amazon Resource Name (ARN) of the person who posted the comment. AuthorArn *string `locationName:"authorArn" type:"string"` - // A unique, client-generated idempotency token that when provided in a request, + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // returns information about the initial request that used that token. ClientRequestToken *string `locationName:"clientRequestToken" type:"string"` // The system-generated comment ID. @@ -142,16 +326,16 @@ func (s Comment) String() string { type CommentsForComparedCommit struct { _ struct{} `type:"structure"` - // The full blob ID of the commit used to establish the 'after' of the comparison. + // The full blob ID of the commit used to establish the after of the comparison. AfterBlobId *string `locationName:"afterBlobId" type:"string"` - // The full commit ID of the commit used to establish the 'after' of the comparison. + // The full commit ID of the commit used to establish the after of the comparison. AfterCommitId *string `locationName:"afterCommitId" type:"string"` - // The full blob ID of the commit used to establish the 'before' of the comparison. + // The full blob ID of the commit used to establish the before of the comparison. BeforeBlobId *string `locationName:"beforeBlobId" type:"string"` - // The full commit ID of the commit used to establish the 'before' of the comparison. + // The full commit ID of the commit used to establish the before of the comparison. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` // An array of comment objects. Each comment object contains information about @@ -160,7 +344,7 @@ type CommentsForComparedCommit struct { // Location information about the comment on the comparison, including the file // name, line number, and whether the version of the file where the comment - // was made is 'BEFORE' or 'AFTER'. + // was made is BEFORE or AFTER. Location *Location `locationName:"location" type:"structure"` // The name of the repository that contains the compared commits. @@ -179,7 +363,7 @@ type CommentsForPullRequest struct { // The full blob ID of the file on which you want to comment on the source commit. AfterBlobId *string `locationName:"afterBlobId" type:"string"` - // he full commit ID of the commit that was the tip of the source branch at + // The full commit ID of the commit that was the tip of the source branch at // the time the comment was made. AfterCommitId *string `locationName:"afterCommitId" type:"string"` @@ -188,9 +372,9 @@ type CommentsForPullRequest struct { BeforeBlobId *string `locationName:"beforeBlobId" type:"string"` // The full commit ID of the commit that was the tip of the destination branch - // when the pull request was created. This commit will be superceded by the - // after commit in the source branch when and if you merge the source branch - // into the destination branch. + // when the pull request was created. This commit is superceded by the after + // commit in the source branch when and if you merge the source branch into + // the destination branch. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` // An array of comment objects. Each comment object contains information about @@ -199,7 +383,7 @@ type CommentsForPullRequest struct { // Location information about the comment on the pull request, including the // file name, line number, and whether the version of the file where the comment - // was made is 'BEFORE' (destination branch) or 'AFTER' (source branch). + // was made is BEFORE (destination branch) or AFTER (source branch). Location *Location `locationName:"location" type:"structure"` // The system-generated ID of the pull request. @@ -218,7 +402,7 @@ func (s CommentsForPullRequest) String() string { type Commit struct { _ struct{} `type:"structure"` - // Any additional data associated with the specified commit. + // Any other data associated with the specified commit. AdditionalData *string `locationName:"additionalData" type:"string"` // Information about the author of the specified commit. Information includes @@ -226,7 +410,7 @@ type Commit struct { // the email address for the author, as configured in Git. Author *UserInfo `locationName:"author" type:"structure"` - // The full SHA of the specified commit. + // The full SHA ID of the specified commit. CommitId *string `locationName:"commitId" type:"string"` // Information about the person who committed the specified commit, also known @@ -317,18 +501,18 @@ func (s ConflictMetadata) String() string { return awsutil.Prettify(s) } -// A list of inputs to use when resolving conflicts during a merge if AUTOMERGE -// is chosen as the conflict resolution strategy. +// If AUTOMERGE is the conflict resolution strategy, a list of inputs to use +// when resolving conflicts during a merge. type ConflictResolution struct { _ struct{} `type:"structure"` - // Files that will be deleted as part of the merge conflict resolution. + // Files to be deleted as part of the merge conflict resolution. DeleteFiles []DeleteFileEntry `locationName:"deleteFiles" type:"list"` - // Files that will have content replaced as part of the merge conflict resolution. + // Files to have content replaced as part of the merge conflict resolution. ReplaceContents []ReplaceContentEntry `locationName:"replaceContents" type:"list"` - // File modes that will be set as part of the merge conflict resolution. + // File modes that are set as part of the merge conflict resolution. SetFileModes []SetFileModeEntry `locationName:"setFileModes" type:"list"` } @@ -368,12 +552,11 @@ func (s *ConflictResolution) Validate() error { return nil } -// A file that will be deleted as part of a commit. +// A file that is deleted as part of a commit. type DeleteFileEntry struct { _ struct{} `type:"structure"` - // The full path of the file that will be deleted, including the name of the - // file. + // The full path of the file to be deleted, including the name of the file. // // FilePath is a required field FilePath *string `locationName:"filePath" type:"string" required:"true"` @@ -420,11 +603,35 @@ func (s Difference) String() string { return awsutil.Prettify(s) } +// Returns information about the approval rules applied to a pull request and +// whether conditions have been met. +type Evaluation struct { + _ struct{} `type:"structure"` + + // The names of the approval rules that have not had their conditions met. + ApprovalRulesNotSatisfied []string `locationName:"approvalRulesNotSatisfied" type:"list"` + + // The names of the approval rules that have had their conditions met. + ApprovalRulesSatisfied []string `locationName:"approvalRulesSatisfied" type:"list"` + + // Whether the state of the pull request is approved. + Approved *bool `locationName:"approved" type:"boolean"` + + // Whether the approval rule requirements for the pull request have been overridden + // and no longer need to be met. + Overridden *bool `locationName:"overridden" type:"boolean"` +} + +// String returns the string representation +func (s Evaluation) String() string { + return awsutil.Prettify(s) +} + // Returns information about a file in a repository. type File struct { _ struct{} `type:"structure"` - // The fully-qualified path to the file in the repository. + // The fully qualified path to the file in the repository. AbsolutePath *string `locationName:"absolutePath" type:"string"` // The blob ID that contains the file information. @@ -443,12 +650,12 @@ func (s File) String() string { return awsutil.Prettify(s) } -// A file that will be added, updated, or deleted as part of a commit. +// A file to be added, updated, or deleted as part of a commit. type FileMetadata struct { _ struct{} `type:"structure"` - // The full path to the file that will be added or updated, including the name - // of the file. + // The full path to the file to be added or updated, including the name of the + // file. AbsolutePath *string `locationName:"absolutePath" type:"string"` // The blob ID that contains the file information. @@ -506,7 +713,7 @@ func (s FileSizes) String() string { type Folder struct { _ struct{} `type:"structure"` - // The fully-qualified path of the folder in the repository. + // The fully qualified path of the folder in the repository. AbsolutePath *string `locationName:"absolutePath" type:"string"` // The relative path of the specified folder from the folder where the query @@ -555,11 +762,11 @@ type Location struct { // if any. FilePath *string `locationName:"filePath" type:"string"` - // The position of a change within a compared file, in line number format. + // The position of a change in a compared file, in line number format. FilePosition *int64 `locationName:"filePosition" type:"long"` // In a comparison of commits or a pull request, whether the change is in the - // 'before' or 'after' of that comparison. + // before or after of that comparison. RelativeFileVersion RelativeFileVersionEnum `locationName:"relativeFileVersion" type:"string" enum:"true"` } @@ -581,9 +788,9 @@ type MergeHunk struct { // A Boolean value indicating whether a combination of hunks contains a conflict. // Conflicts occur when the same file or the same lines in a file were modified // in both the source and destination of a merge or pull request. Valid values - // include true, false, and null. This will be true when the hunk represents - // a conflict and one or more files contains a line conflict. File mode conflicts - // in a merge will not set this to be true. + // include true, false, and null. True when the hunk represents a conflict and + // one or more files contains a line conflict. File mode conflicts in a merge + // do not set this to true. IsConflict *bool `locationName:"isConflict" type:"boolean"` // Information about the merge hunk in the source of a merge or pull request. @@ -603,8 +810,8 @@ type MergeHunkDetail struct { // The end position of the hunk in the merge result. EndLine *int64 `locationName:"endLine" type:"integer"` - // The base-64 encoded content of the hunk merged region that might or might - // not contain a conflict. + // The base-64 encoded content of the hunk merged region that might contain + // a conflict. HunkContent *string `locationName:"hunkContent" type:"string"` // The start position of the hunk in the merge result. @@ -646,8 +853,8 @@ type MergeOperations struct { // The operation on a file in the destination of a merge or pull request. Destination ChangeTypeEnum `locationName:"destination" type:"string" enum:"true"` - // The operation on a file (add, modify, or delete) of a file in the source - // of a merge or pull request. + // The operation (add, modify, or delete) on a file in the source of a merge + // or pull request. Source ChangeTypeEnum `locationName:"source" type:"string" enum:"true"` } @@ -675,17 +882,37 @@ func (s ObjectTypes) String() string { return awsutil.Prettify(s) } +// Returns information about the template that created the approval rule for +// a pull request. +type OriginApprovalRuleTemplate struct { + _ struct{} `type:"structure"` + + // The ID of the template that created the approval rule. + ApprovalRuleTemplateId *string `locationName:"approvalRuleTemplateId" type:"string"` + + // The name of the template that created the approval rule. + ApprovalRuleTemplateName *string `locationName:"approvalRuleTemplateName" min:"1" type:"string"` +} + +// String returns the string representation +func (s OriginApprovalRuleTemplate) String() string { + return awsutil.Prettify(s) +} + // Returns information about a pull request. type PullRequest struct { _ struct{} `type:"structure"` + // The approval rules applied to the pull request. + ApprovalRules []ApprovalRule `locationName:"approvalRules" type:"list"` + // The Amazon Resource Name (ARN) of the user who created the pull request. AuthorArn *string `locationName:"authorArn" type:"string"` - // A unique, client-generated idempotency token that when provided in a request, + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request - // will return information about the initial request that used that token. + // returns information about the initial request that used that token. ClientRequestToken *string `locationName:"clientRequestToken" type:"string"` // The date and time the pull request was originally created, in timestamp format. @@ -710,8 +937,11 @@ type PullRequest struct { // branch for the pull request. PullRequestTargets []PullRequestTarget `locationName:"pullRequestTargets" type:"list"` + // The system-generated revision ID for the pull request. + RevisionId *string `locationName:"revisionId" type:"string"` + // The user-defined title of the pull request. This title is displayed in the - // list of pull requests to other users of the repository. + // list of pull requests to other repository users. Title *string `locationName:"title" type:"string"` } @@ -750,18 +980,27 @@ type PullRequestEvent struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the user whose actions resulted in the - // event. Examples include updating the pull request with additional commits - // or changing the status of a pull request. + // event. Examples include updating the pull request with more commits or changing + // the status of a pull request. ActorArn *string `locationName:"actorArn" type:"string"` + // Information about a pull request event. + ApprovalRuleEventMetadata *ApprovalRuleEventMetadata `locationName:"approvalRuleEventMetadata" type:"structure"` + + // Information about an approval rule override event for a pull request. + ApprovalRuleOverriddenEventMetadata *ApprovalRuleOverriddenEventMetadata `locationName:"approvalRuleOverriddenEventMetadata" type:"structure"` + + // Information about an approval state change for a pull request. + ApprovalStateChangedEventMetadata *ApprovalStateChangedEventMetadata `locationName:"approvalStateChangedEventMetadata" type:"structure"` + // The day and time of the pull request event, in timestamp format. EventDate *time.Time `locationName:"eventDate" type:"timestamp"` // Information about the source and destination branches for the pull request. PullRequestCreatedEventMetadata *PullRequestCreatedEventMetadata `locationName:"pullRequestCreatedEventMetadata" type:"structure"` - // The type of the pull request event, for example a status change event (PULL_REQUEST_STATUS_CHANGED) - // or update event (PULL_REQUEST_SOURCE_REFERENCE_UPDATED). + // The type of the pull request event (for example, a status change event (PULL_REQUEST_STATUS_CHANGED) + // or update event (PULL_REQUEST_SOURCE_REFERENCE_UPDATED)). PullRequestEventType PullRequestEventType `locationName:"pullRequestEventType" type:"string" enum:"true"` // The system-generated ID of the pull request. @@ -787,7 +1026,7 @@ func (s PullRequestEvent) String() string { type PullRequestMergedStateChangedEventMetadata struct { _ struct{} `type:"structure"` - // The name of the branch that the pull request will be merged into. + // The name of the branch that the pull request is merged into. DestinationReference *string `locationName:"destinationReference" type:"string"` // Information about the merge state change event. @@ -848,8 +1087,8 @@ type PullRequestTarget struct { // commit where the pull request was or will be merged. DestinationCommit *string `locationName:"destinationCommit" type:"string"` - // The branch of the repository where the pull request changes will be merged - // into. Also known as the destination branch. + // The branch of the repository where the pull request changes are merged. Also + // known as the destination branch. DestinationReference *string `locationName:"destinationReference" type:"string"` // The commit ID of the most recent commit that the source branch and the destination @@ -866,7 +1105,7 @@ type PullRequestTarget struct { // The full commit ID of the tip of the source branch used to create the pull // request. If the pull request branch is updated by a push while the pull request - // is open, the commit ID will change to reflect the new tip of the branch. + // is open, the commit ID changes to reflect the new tip of the branch. SourceCommit *string `locationName:"sourceCommit" type:"string"` // The branch of the repository that contains the changes for the pull request. @@ -879,7 +1118,7 @@ func (s PullRequestTarget) String() string { return awsutil.Prettify(s) } -// Information about a file that will be added or updated as part of a commit. +// Information about a file added or updated as part of a commit. type PutFileEntry struct { _ struct{} `type:"structure"` @@ -1032,25 +1271,25 @@ func (s RepositoryNameIdPair) String() string { type RepositoryTrigger struct { _ struct{} `type:"structure"` - // The branches that will be included in the trigger configuration. If you specify - // an empty array, the trigger will apply to all branches. + // The branches to be included in the trigger configuration. If you specify + // an empty array, the trigger applies to all branches. // // Although no content is required in the array, you must include the array // itself. Branches []string `locationName:"branches" type:"list"` - // Any custom data associated with the trigger that will be included in the - // information sent to the target of the trigger. + // Any custom data associated with the trigger to be included in the information + // sent to the target of the trigger. CustomData *string `locationName:"customData" type:"string"` - // The ARN of the resource that is the target for a trigger. For example, the - // ARN of a topic in Amazon SNS. + // The ARN of the resource that is the target for a trigger (for example, the + // ARN of a topic in Amazon SNS). // // DestinationArn is a required field DestinationArn *string `locationName:"destinationArn" type:"string" required:"true"` - // The repository events that will cause the trigger to run actions in another - // service, such as sending a notification through Amazon SNS. + // The repository events that cause the trigger to run actions in another service, + // such as sending a notification through Amazon SNS. // // The valid value "all" cannot be used with any other values. // @@ -1094,7 +1333,7 @@ func (s *RepositoryTrigger) Validate() error { type RepositoryTriggerExecutionFailure struct { _ struct{} `type:"structure"` - // Additional message information about the trigger that did not run. + // Message information about the trigger that did not run. FailureMessage *string `locationName:"failureMessage" type:"string"` // The name of the trigger that did not run. @@ -1199,7 +1438,7 @@ func (s SubModule) String() string { type SymbolicLink struct { _ struct{} `type:"structure"` - // The fully-qualified path to the folder that contains the symbolic link. + // The fully qualified path to the folder that contains the symbolic link. AbsolutePath *string `locationName:"absolutePath" type:"string"` // The blob ID that contains the information about the symbolic link. @@ -1222,8 +1461,8 @@ func (s SymbolicLink) String() string { type Target struct { _ struct{} `type:"structure"` - // The branch of the repository where the pull request changes will be merged - // into. Also known as the destination branch. + // The branch of the repository where the pull request changes are merged. Also + // known as the destination branch. DestinationReference *string `locationName:"destinationReference" type:"string"` // The name of the repository that contains the pull request. diff --git a/service/codecommit/codecommitiface/interface.go b/service/codecommit/codecommitiface/interface.go index 1c4ba71d686..95335b318d6 100644 --- a/service/codecommit/codecommitiface/interface.go +++ b/service/codecommit/codecommitiface/interface.go @@ -23,7 +23,7 @@ import ( // // myFunc uses an SDK service client to make a request to // // CodeCommit. // func myFunc(svc codecommitiface.ClientAPI) bool { -// // Make svc.BatchDescribeMergeConflicts request +// // Make svc.AssociateApprovalRuleTemplateWithRepository request // } // // func main() { @@ -43,7 +43,7 @@ import ( // type mockClientClient struct { // codecommitiface.ClientPI // } -// func (m *mockClientClient) BatchDescribeMergeConflicts(input *codecommit.BatchDescribeMergeConflictsInput) (*codecommit.BatchDescribeMergeConflictsOutput, error) { +// func (m *mockClientClient) AssociateApprovalRuleTemplateWithRepository(input *codecommit.AssociateApprovalRuleTemplateWithRepositoryInput) (*codecommit.AssociateApprovalRuleTemplateWithRepositoryOutput, error) { // // mock response/functionality // } // @@ -61,34 +61,54 @@ import ( // and waiters. Its suggested to use the pattern above for testing, or using // tooling to generate mocks to satisfy the interfaces. type ClientAPI interface { + AssociateApprovalRuleTemplateWithRepositoryRequest(*codecommit.AssociateApprovalRuleTemplateWithRepositoryInput) codecommit.AssociateApprovalRuleTemplateWithRepositoryRequest + + BatchAssociateApprovalRuleTemplateWithRepositoriesRequest(*codecommit.BatchAssociateApprovalRuleTemplateWithRepositoriesInput) codecommit.BatchAssociateApprovalRuleTemplateWithRepositoriesRequest + BatchDescribeMergeConflictsRequest(*codecommit.BatchDescribeMergeConflictsInput) codecommit.BatchDescribeMergeConflictsRequest + BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest(*codecommit.BatchDisassociateApprovalRuleTemplateFromRepositoriesInput) codecommit.BatchDisassociateApprovalRuleTemplateFromRepositoriesRequest + BatchGetCommitsRequest(*codecommit.BatchGetCommitsInput) codecommit.BatchGetCommitsRequest BatchGetRepositoriesRequest(*codecommit.BatchGetRepositoriesInput) codecommit.BatchGetRepositoriesRequest + CreateApprovalRuleTemplateRequest(*codecommit.CreateApprovalRuleTemplateInput) codecommit.CreateApprovalRuleTemplateRequest + CreateBranchRequest(*codecommit.CreateBranchInput) codecommit.CreateBranchRequest CreateCommitRequest(*codecommit.CreateCommitInput) codecommit.CreateCommitRequest CreatePullRequestRequest(*codecommit.CreatePullRequestInput) codecommit.CreatePullRequestRequest + CreatePullRequestApprovalRuleRequest(*codecommit.CreatePullRequestApprovalRuleInput) codecommit.CreatePullRequestApprovalRuleRequest + CreateRepositoryRequest(*codecommit.CreateRepositoryInput) codecommit.CreateRepositoryRequest CreateUnreferencedMergeCommitRequest(*codecommit.CreateUnreferencedMergeCommitInput) codecommit.CreateUnreferencedMergeCommitRequest + DeleteApprovalRuleTemplateRequest(*codecommit.DeleteApprovalRuleTemplateInput) codecommit.DeleteApprovalRuleTemplateRequest + DeleteBranchRequest(*codecommit.DeleteBranchInput) codecommit.DeleteBranchRequest DeleteCommentContentRequest(*codecommit.DeleteCommentContentInput) codecommit.DeleteCommentContentRequest DeleteFileRequest(*codecommit.DeleteFileInput) codecommit.DeleteFileRequest + DeletePullRequestApprovalRuleRequest(*codecommit.DeletePullRequestApprovalRuleInput) codecommit.DeletePullRequestApprovalRuleRequest + DeleteRepositoryRequest(*codecommit.DeleteRepositoryInput) codecommit.DeleteRepositoryRequest DescribeMergeConflictsRequest(*codecommit.DescribeMergeConflictsInput) codecommit.DescribeMergeConflictsRequest DescribePullRequestEventsRequest(*codecommit.DescribePullRequestEventsInput) codecommit.DescribePullRequestEventsRequest + DisassociateApprovalRuleTemplateFromRepositoryRequest(*codecommit.DisassociateApprovalRuleTemplateFromRepositoryInput) codecommit.DisassociateApprovalRuleTemplateFromRepositoryRequest + + EvaluatePullRequestApprovalRulesRequest(*codecommit.EvaluatePullRequestApprovalRulesInput) codecommit.EvaluatePullRequestApprovalRulesRequest + + GetApprovalRuleTemplateRequest(*codecommit.GetApprovalRuleTemplateInput) codecommit.GetApprovalRuleTemplateRequest + GetBlobRequest(*codecommit.GetBlobInput) codecommit.GetBlobRequest GetBranchRequest(*codecommit.GetBranchInput) codecommit.GetBranchRequest @@ -115,16 +135,26 @@ type ClientAPI interface { GetPullRequestRequest(*codecommit.GetPullRequestInput) codecommit.GetPullRequestRequest + GetPullRequestApprovalStatesRequest(*codecommit.GetPullRequestApprovalStatesInput) codecommit.GetPullRequestApprovalStatesRequest + + GetPullRequestOverrideStateRequest(*codecommit.GetPullRequestOverrideStateInput) codecommit.GetPullRequestOverrideStateRequest + GetRepositoryRequest(*codecommit.GetRepositoryInput) codecommit.GetRepositoryRequest GetRepositoryTriggersRequest(*codecommit.GetRepositoryTriggersInput) codecommit.GetRepositoryTriggersRequest + ListApprovalRuleTemplatesRequest(*codecommit.ListApprovalRuleTemplatesInput) codecommit.ListApprovalRuleTemplatesRequest + + ListAssociatedApprovalRuleTemplatesForRepositoryRequest(*codecommit.ListAssociatedApprovalRuleTemplatesForRepositoryInput) codecommit.ListAssociatedApprovalRuleTemplatesForRepositoryRequest + ListBranchesRequest(*codecommit.ListBranchesInput) codecommit.ListBranchesRequest ListPullRequestsRequest(*codecommit.ListPullRequestsInput) codecommit.ListPullRequestsRequest ListRepositoriesRequest(*codecommit.ListRepositoriesInput) codecommit.ListRepositoriesRequest + ListRepositoriesForApprovalRuleTemplateRequest(*codecommit.ListRepositoriesForApprovalRuleTemplateInput) codecommit.ListRepositoriesForApprovalRuleTemplateRequest + ListTagsForResourceRequest(*codecommit.ListTagsForResourceInput) codecommit.ListTagsForResourceRequest MergeBranchesByFastForwardRequest(*codecommit.MergeBranchesByFastForwardInput) codecommit.MergeBranchesByFastForwardRequest @@ -139,6 +169,8 @@ type ClientAPI interface { MergePullRequestByThreeWayRequest(*codecommit.MergePullRequestByThreeWayInput) codecommit.MergePullRequestByThreeWayRequest + OverridePullRequestApprovalRulesRequest(*codecommit.OverridePullRequestApprovalRulesInput) codecommit.OverridePullRequestApprovalRulesRequest + PostCommentForComparedCommitRequest(*codecommit.PostCommentForComparedCommitInput) codecommit.PostCommentForComparedCommitRequest PostCommentForPullRequestRequest(*codecommit.PostCommentForPullRequestInput) codecommit.PostCommentForPullRequestRequest @@ -155,10 +187,20 @@ type ClientAPI interface { UntagResourceRequest(*codecommit.UntagResourceInput) codecommit.UntagResourceRequest + UpdateApprovalRuleTemplateContentRequest(*codecommit.UpdateApprovalRuleTemplateContentInput) codecommit.UpdateApprovalRuleTemplateContentRequest + + UpdateApprovalRuleTemplateDescriptionRequest(*codecommit.UpdateApprovalRuleTemplateDescriptionInput) codecommit.UpdateApprovalRuleTemplateDescriptionRequest + + UpdateApprovalRuleTemplateNameRequest(*codecommit.UpdateApprovalRuleTemplateNameInput) codecommit.UpdateApprovalRuleTemplateNameRequest + UpdateCommentRequest(*codecommit.UpdateCommentInput) codecommit.UpdateCommentRequest UpdateDefaultBranchRequest(*codecommit.UpdateDefaultBranchInput) codecommit.UpdateDefaultBranchRequest + UpdatePullRequestApprovalRuleContentRequest(*codecommit.UpdatePullRequestApprovalRuleContentInput) codecommit.UpdatePullRequestApprovalRuleContentRequest + + UpdatePullRequestApprovalStateRequest(*codecommit.UpdatePullRequestApprovalStateInput) codecommit.UpdatePullRequestApprovalStateRequest + UpdatePullRequestDescriptionRequest(*codecommit.UpdatePullRequestDescriptionInput) codecommit.UpdatePullRequestDescriptionRequest UpdatePullRequestStatusRequest(*codecommit.UpdatePullRequestStatusInput) codecommit.UpdatePullRequestStatusRequest diff --git a/service/cognitoidentityprovider/api_enums.go b/service/cognitoidentityprovider/api_enums.go index 62449915abf..a831fa10a37 100644 --- a/service/cognitoidentityprovider/api_enums.go +++ b/service/cognitoidentityprovider/api_enums.go @@ -80,12 +80,13 @@ type AuthFlowType string // Enum values for AuthFlowType const ( - AuthFlowTypeUserSrpAuth AuthFlowType = "USER_SRP_AUTH" - AuthFlowTypeRefreshTokenAuth AuthFlowType = "REFRESH_TOKEN_AUTH" - AuthFlowTypeRefreshToken AuthFlowType = "REFRESH_TOKEN" - AuthFlowTypeCustomAuth AuthFlowType = "CUSTOM_AUTH" - AuthFlowTypeAdminNoSrpAuth AuthFlowType = "ADMIN_NO_SRP_AUTH" - AuthFlowTypeUserPasswordAuth AuthFlowType = "USER_PASSWORD_AUTH" + AuthFlowTypeUserSrpAuth AuthFlowType = "USER_SRP_AUTH" + AuthFlowTypeRefreshTokenAuth AuthFlowType = "REFRESH_TOKEN_AUTH" + AuthFlowTypeRefreshToken AuthFlowType = "REFRESH_TOKEN" + AuthFlowTypeCustomAuth AuthFlowType = "CUSTOM_AUTH" + AuthFlowTypeAdminNoSrpAuth AuthFlowType = "ADMIN_NO_SRP_AUTH" + AuthFlowTypeUserPasswordAuth AuthFlowType = "USER_PASSWORD_AUTH" + AuthFlowTypeAdminUserPasswordAuth AuthFlowType = "ADMIN_USER_PASSWORD_AUTH" ) func (enum AuthFlowType) MarshalValue() (string, error) { @@ -318,9 +319,14 @@ type ExplicitAuthFlowsType string // Enum values for ExplicitAuthFlowsType const ( - ExplicitAuthFlowsTypeAdminNoSrpAuth ExplicitAuthFlowsType = "ADMIN_NO_SRP_AUTH" - ExplicitAuthFlowsTypeCustomAuthFlowOnly ExplicitAuthFlowsType = "CUSTOM_AUTH_FLOW_ONLY" - ExplicitAuthFlowsTypeUserPasswordAuth ExplicitAuthFlowsType = "USER_PASSWORD_AUTH" + ExplicitAuthFlowsTypeAdminNoSrpAuth ExplicitAuthFlowsType = "ADMIN_NO_SRP_AUTH" + ExplicitAuthFlowsTypeCustomAuthFlowOnly ExplicitAuthFlowsType = "CUSTOM_AUTH_FLOW_ONLY" + ExplicitAuthFlowsTypeUserPasswordAuth ExplicitAuthFlowsType = "USER_PASSWORD_AUTH" + ExplicitAuthFlowsTypeAllowAdminUserPasswordAuth ExplicitAuthFlowsType = "ALLOW_ADMIN_USER_PASSWORD_AUTH" + ExplicitAuthFlowsTypeAllowCustomAuth ExplicitAuthFlowsType = "ALLOW_CUSTOM_AUTH" + ExplicitAuthFlowsTypeAllowUserPasswordAuth ExplicitAuthFlowsType = "ALLOW_USER_PASSWORD_AUTH" + ExplicitAuthFlowsTypeAllowUserSrpAuth ExplicitAuthFlowsType = "ALLOW_USER_SRP_AUTH" + ExplicitAuthFlowsTypeAllowRefreshTokenAuth ExplicitAuthFlowsType = "ALLOW_REFRESH_TOKEN_AUTH" ) func (enum ExplicitAuthFlowsType) MarshalValue() (string, error) { @@ -404,6 +410,23 @@ func (enum OAuthFlowType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type PreventUserExistenceErrorTypes string + +// Enum values for PreventUserExistenceErrorTypes +const ( + PreventUserExistenceErrorTypesLegacy PreventUserExistenceErrorTypes = "LEGACY" + PreventUserExistenceErrorTypesEnabled PreventUserExistenceErrorTypes = "ENABLED" +) + +func (enum PreventUserExistenceErrorTypes) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum PreventUserExistenceErrorTypes) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type RiskDecisionType string // Enum values for RiskDecisionType diff --git a/service/cognitoidentityprovider/api_op_AdminInitiateAuth.go b/service/cognitoidentityprovider/api_op_AdminInitiateAuth.go index 6e1a4b1caf9..7f64b89b3de 100644 --- a/service/cognitoidentityprovider/api_op_AdminInitiateAuth.go +++ b/service/cognitoidentityprovider/api_op_AdminInitiateAuth.go @@ -48,6 +48,11 @@ type AdminInitiateAuthInput struct { // will invoke the user migration Lambda if the USERNAME is not found in // the user pool. // + // * ADMIN_USER_PASSWORD_AUTH: Admin-based user password authentication. + // This replaces the ADMIN_NO_SRP_AUTH authentication flow. In this flow, + // Cognito receives the password in the request instead of using the SRP + // process to verify passwords. + // // AuthFlow is a required field AuthFlow AuthFlowType `type:"string" required:"true" enum:"true"` diff --git a/service/cognitoidentityprovider/api_op_ConfirmForgotPassword.go b/service/cognitoidentityprovider/api_op_ConfirmForgotPassword.go index a41afc92e7d..18602f022d1 100644 --- a/service/cognitoidentityprovider/api_op_ConfirmForgotPassword.go +++ b/service/cognitoidentityprovider/api_op_ConfirmForgotPassword.go @@ -27,13 +27,13 @@ type ConfirmForgotPasswordInput struct { // // You create custom workflows by assigning AWS Lambda functions to user pool // triggers. When you use the ConfirmForgotPassword API action, Amazon Cognito - // invokes the functions that are assigned to the post confirmation and pre - // mutation triggers. When Amazon Cognito invokes either of these functions, - // it passes a JSON payload, which the function receives as input. This payload - // contains a clientMetadata attribute, which provides the data that you assigned - // to the ClientMetadata parameter in your ConfirmForgotPassword request. In - // your function code in AWS Lambda, you can process the clientMetadata value - // to enhance your workflow for your specific needs. + // invokes the function that is assigned to the post confirmation trigger. When + // Amazon Cognito invokes this function, it passes a JSON payload, which the + // function receives as input. This payload contains a clientMetadata attribute, + // which provides the data that you assigned to the ClientMetadata parameter + // in your ConfirmForgotPassword request. In your function code in AWS Lambda, + // you can process the clientMetadata value to enhance your workflow for your + // specific needs. // // For more information, see Customizing User Pool Workflows with Lambda Triggers // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) diff --git a/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go b/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go index e6d83fe2769..9787afe08ec 100644 --- a/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go +++ b/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go @@ -75,7 +75,28 @@ type CreateUserPoolClientInput struct { // App callback URLs such as myapp://example are also supported. DefaultRedirectURI *string `min:"1" type:"string"` - // The explicit authentication flows. + // The authentication flows that are supported by the user pool clients. Flow + // names without the ALLOW_ prefix are deprecated in favor of new names with + // the ALLOW_ prefix. Note that values with ALLOW_ prefix cannot be used along + // with values without ALLOW_ prefix. + // + // Valid values include: + // + // * ALLOW_ADMIN_USER_PASSWORD_AUTH: Enable admin based user password authentication + // flow ADMIN_USER_PASSWORD_AUTH. This setting replaces the ADMIN_NO_SRP_AUTH + // setting. With this authentication flow, Cognito receives the password + // in the request instead of using the SRP (Secure Remote Password protocol) + // protocol to verify passwords. + // + // * ALLOW_CUSTOM_AUTH: Enable Lambda trigger based authentication. + // + // * ALLOW_USER_PASSWORD_AUTH: Enable user password-based authentication. + // In this flow, Cognito receives the password in the request instead of + // using the SRP protocol to verify passwords. + // + // * ALLOW_USER_SRP_AUTH: Enable SRP based authentication. + // + // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []ExplicitAuthFlowsType `type:"list"` // Boolean to specify whether you want to generate a secret for the user pool @@ -85,6 +106,44 @@ type CreateUserPoolClientInput struct { // A list of allowed logout URLs for the identity providers. LogoutURLs []string `type:"list"` + // Use this setting to choose which errors and responses are returned by Cognito + // APIs during authentication, account confirmation, and password recovery when + // the user does not exist in the user pool. When set to ENABLED and the user + // does not exist, authentication returns an error indicating either the username + // or password was incorrect, and account confirmation and password recovery + // return a response indicating a code was sent to a simulated destination. + // When set to LEGACY, those APIs will return a UserNotFoundException exception + // if the user does not exist in the user pool. + // + // Valid values include: + // + // * ENABLED - This prevents user existence-related errors. + // + // * LEGACY - This represents the old behavior of Cognito where user existence + // related errors are not prevented. + // + // This setting affects the behavior of following APIs: + // + // * AdminInitiateAuth + // + // * AdminRespondToAuthChallenge + // + // * InitiateAuth + // + // * RespondToAuthChallenge + // + // * ForgotPassword + // + // * ConfirmForgotPassword + // + // * ConfirmSignUp + // + // * ResendConfirmationCode + // + // After January 1st 2020, the value of PreventUserExistenceErrors will default + // to ENABLED for newly created user pool clients if no value is provided. + PreventUserExistenceErrors PreventUserExistenceErrorTypes `type:"string" enum:"true"` + // The read attributes. ReadAttributes []string `type:"list"` diff --git a/service/cognitoidentityprovider/api_op_InitiateAuth.go b/service/cognitoidentityprovider/api_op_InitiateAuth.go index 245b8c202f6..da1001b45d1 100644 --- a/service/cognitoidentityprovider/api_op_InitiateAuth.go +++ b/service/cognitoidentityprovider/api_op_InitiateAuth.go @@ -44,6 +44,11 @@ type InitiateAuthInput struct { // will invoke the user migration Lambda if the USERNAME is not found in // the user pool. // + // * ADMIN_USER_PASSWORD_AUTH: Admin-based user password authentication. + // This replaces the ADMIN_NO_SRP_AUTH authentication flow. In this flow, + // Cognito receives the password in the request instead of using the SRP + // process to verify passwords. + // // ADMIN_NO_SRP_AUTH is not a valid value. // // AuthFlow is a required field diff --git a/service/cognitoidentityprovider/api_op_UpdateGroup.go b/service/cognitoidentityprovider/api_op_UpdateGroup.go index 22487415726..a7df89d9a68 100644 --- a/service/cognitoidentityprovider/api_op_UpdateGroup.go +++ b/service/cognitoidentityprovider/api_op_UpdateGroup.go @@ -87,6 +87,9 @@ const opUpdateGroup = "UpdateGroup" // // Calling this action requires developer credentials. // +// If you don't provide a value for an attribute, it will be set to the default +// value. +// // // Example sending a request using UpdateGroupRequest. // req := client.UpdateGroupRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/cognitoidentityprovider/api_op_UpdateResourceServer.go b/service/cognitoidentityprovider/api_op_UpdateResourceServer.go index 7f22b81913b..b1bd2989329 100644 --- a/service/cognitoidentityprovider/api_op_UpdateResourceServer.go +++ b/service/cognitoidentityprovider/api_op_UpdateResourceServer.go @@ -96,6 +96,9 @@ const opUpdateResourceServer = "UpdateResourceServer" // // Updates the name and scopes of resource server. All other fields are read-only. // +// If you don't provide a value for an attribute, it will be set to the default +// value. +// // // Example sending a request using UpdateResourceServerRequest. // req := client.UpdateResourceServerRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/cognitoidentityprovider/api_op_UpdateUserAttributes.go b/service/cognitoidentityprovider/api_op_UpdateUserAttributes.go index a47ae0afc7c..a4a963f0c84 100644 --- a/service/cognitoidentityprovider/api_op_UpdateUserAttributes.go +++ b/service/cognitoidentityprovider/api_op_UpdateUserAttributes.go @@ -24,13 +24,13 @@ type UpdateUserAttributesInput struct { // // You create custom workflows by assigning AWS Lambda functions to user pool // triggers. When you use the UpdateUserAttributes API action, Amazon Cognito - // invokes the functions that are assigned to the custom message and pre mutation - // triggers. When Amazon Cognito invokes either of these functions, it passes - // a JSON payload, which the function receives as input. This payload contains - // a clientMetadata attribute, which provides the data that you assigned to - // the ClientMetadata parameter in your UpdateUserAttributes request. In your - // function code in AWS Lambda, you can process the clientMetadata value to - // enhance your workflow for your specific needs. + // invokes the function that is assigned to the custom message trigger. When + // Amazon Cognito invokes this function, it passes a JSON payload, which the + // function receives as input. This payload contains a clientMetadata attribute, + // which provides the data that you assigned to the ClientMetadata parameter + // in your UpdateUserAttributes request. In your function code in AWS Lambda, + // you can process the clientMetadata value to enhance your workflow for your + // specific needs. // // For more information, see Customizing User Pool Workflows with Lambda Triggers // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) diff --git a/service/cognitoidentityprovider/api_op_UpdateUserPool.go b/service/cognitoidentityprovider/api_op_UpdateUserPool.go index 1c6f2b3070c..9c3cca5c36d 100644 --- a/service/cognitoidentityprovider/api_op_UpdateUserPool.go +++ b/service/cognitoidentityprovider/api_op_UpdateUserPool.go @@ -162,9 +162,11 @@ const opUpdateUserPool = "UpdateUserPool" // UpdateUserPoolRequest returns a request value for making API operation for // Amazon Cognito Identity Provider. // -// Updates the specified user pool with the specified attributes. If you don't -// provide a value for an attribute, it will be set to the default value. You -// can get a list of the current user pool settings with . +// Updates the specified user pool with the specified attributes. You can get +// a list of the current user pool settings with . +// +// If you don't provide a value for an attribute, it will be set to the default +// value. // // // Example sending a request using UpdateUserPoolRequest. // req := client.UpdateUserPoolRequest(params) diff --git a/service/cognitoidentityprovider/api_op_UpdateUserPoolClient.go b/service/cognitoidentityprovider/api_op_UpdateUserPoolClient.go index 9ec6ca77b5b..6cf125aeca8 100644 --- a/service/cognitoidentityprovider/api_op_UpdateUserPoolClient.go +++ b/service/cognitoidentityprovider/api_op_UpdateUserPoolClient.go @@ -75,12 +75,71 @@ type UpdateUserPoolClientInput struct { // App callback URLs such as myapp://example are also supported. DefaultRedirectURI *string `min:"1" type:"string"` - // Explicit authentication flows. + // The authentication flows that are supported by the user pool clients. Flow + // names without the ALLOW_ prefix are deprecated in favor of new names with + // the ALLOW_ prefix. Note that values with ALLOW_ prefix cannot be used along + // with values without ALLOW_ prefix. + // + // Valid values include: + // + // * ALLOW_ADMIN_USER_PASSWORD_AUTH: Enable admin based user password authentication + // flow ADMIN_USER_PASSWORD_AUTH. This setting replaces the ADMIN_NO_SRP_AUTH + // setting. With this authentication flow, Cognito receives the password + // in the request instead of using the SRP (Secure Remote Password protocol) + // protocol to verify passwords. + // + // * ALLOW_CUSTOM_AUTH: Enable Lambda trigger based authentication. + // + // * ALLOW_USER_PASSWORD_AUTH: Enable user password-based authentication. + // In this flow, Cognito receives the password in the request instead of + // using the SRP protocol to verify passwords. + // + // * ALLOW_USER_SRP_AUTH: Enable SRP based authentication. + // + // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []ExplicitAuthFlowsType `type:"list"` // A list of allowed logout URLs for the identity providers. LogoutURLs []string `type:"list"` + // Use this setting to choose which errors and responses are returned by Cognito + // APIs during authentication, account confirmation, and password recovery when + // the user does not exist in the user pool. When set to ENABLED and the user + // does not exist, authentication returns an error indicating either the username + // or password was incorrect, and account confirmation and password recovery + // return a response indicating a code was sent to a simulated destination. + // When set to LEGACY, those APIs will return a UserNotFoundException exception + // if the user does not exist in the user pool. + // + // Valid values include: + // + // * ENABLED - This prevents user existence-related errors. + // + // * LEGACY - This represents the old behavior of Cognito where user existence + // related errors are not prevented. + // + // This setting affects the behavior of following APIs: + // + // * AdminInitiateAuth + // + // * AdminRespondToAuthChallenge + // + // * InitiateAuth + // + // * RespondToAuthChallenge + // + // * ForgotPassword + // + // * ConfirmForgotPassword + // + // * ConfirmSignUp + // + // * ResendConfirmationCode + // + // After January 1st 2020, the value of PreventUserExistenceErrors will default + // to ENABLED for newly created user pool clients if no value is provided. + PreventUserExistenceErrors PreventUserExistenceErrorTypes `type:"string" enum:"true"` + // The read-only attributes of the user pool. ReadAttributes []string `type:"list"` @@ -163,8 +222,10 @@ const opUpdateUserPoolClient = "UpdateUserPoolClient" // Amazon Cognito Identity Provider. // // Updates the specified user pool app client with the specified attributes. +// You can get a list of the current user pool app client settings with . +// // If you don't provide a value for an attribute, it will be set to the default -// value. You can get a list of the current user pool app client settings with . +// value. // // // Example sending a request using UpdateUserPoolClientRequest. // req := client.UpdateUserPoolClientRequest(params) diff --git a/service/cognitoidentityprovider/api_types.go b/service/cognitoidentityprovider/api_types.go index eeee49e3dd5..a947c38e3a3 100644 --- a/service/cognitoidentityprovider/api_types.go +++ b/service/cognitoidentityprovider/api_types.go @@ -654,6 +654,22 @@ func (s DomainDescriptionType) String() string { type EmailConfigurationType struct { _ struct{} `type:"structure"` + // The set of configuration rules that can be applied to emails sent using Amazon + // SES. A configuration set is applied to an email by including a reference + // to the configuration set in the headers of the email. Once applied, all of + // the rules in that configuration set are applied to the email. Configuration + // sets can be used to apply the following types of rules to emails: + // + // * Event publishing – Amazon SES can track the number of send, delivery, + // open, click, bounce, and complaint events for each email sent. Use event + // publishing to send information about these events to other AWS services + // such as SNS and CloudWatch. + // + // * IP pool management – When leasing dedicated IP addresses with Amazon + // SES, you can create groups of IP addresses, called dedicated IP pools. + // You can then associate the dedicated IP pools with configuration sets. + ConfigurationSet *string `min:"1" type:"string"` + // Specifies whether Amazon Cognito emails your users by using its built-in // email functionality or your Amazon SES email configuration. Specify one of // the following values: @@ -696,6 +712,11 @@ type EmailConfigurationType struct { // in the Amazon Cognito Developer Guide. EmailSendingAccount EmailSendingAccountType `type:"string" enum:"true"` + // Identifies either the sender’s email address or the sender’s name with + // their email address. For example, testuser@example.com or Test User . + // This address will appear before the body of the email. + From *string `type:"string"` + // The destination to which the receiver of the email should reply to. ReplyToEmailAddress *string `type:"string"` @@ -720,6 +741,9 @@ func (s EmailConfigurationType) String() string { // Validate inspects the fields of the type to determine if they are valid. func (s *EmailConfigurationType) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "EmailConfigurationType"} + if s.ConfigurationSet != nil && len(*s.ConfigurationSet) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ConfigurationSet", 1)) + } if s.SourceArn != nil && len(*s.SourceArn) < 20 { invalidParams.Add(aws.NewErrParamMinLen("SourceArn", 20)) } @@ -1847,7 +1871,28 @@ type UserPoolClientType struct { // App callback URLs such as myapp://example are also supported. DefaultRedirectURI *string `min:"1" type:"string"` - // The explicit authentication flows. + // The authentication flows that are supported by the user pool clients. Flow + // names without the ALLOW_ prefix are deprecated in favor of new names with + // the ALLOW_ prefix. Note that values with ALLOW_ prefix cannot be used along + // with values without ALLOW_ prefix. + // + // Valid values include: + // + // * ALLOW_ADMIN_USER_PASSWORD_AUTH: Enable admin based user password authentication + // flow ADMIN_USER_PASSWORD_AUTH. This setting replaces the ADMIN_NO_SRP_AUTH + // setting. With this authentication flow, Cognito receives the password + // in the request instead of using the SRP (Secure Remote Password protocol) + // protocol to verify passwords. + // + // * ALLOW_CUSTOM_AUTH: Enable Lambda trigger based authentication. + // + // * ALLOW_USER_PASSWORD_AUTH: Enable user password-based authentication. + // In this flow, Cognito receives the password in the request instead of + // using the SRP protocol to verify passwords. + // + // * ALLOW_USER_SRP_AUTH: Enable SRP based authentication. + // + // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []ExplicitAuthFlowsType `type:"list"` // The date the user pool client was last modified. @@ -1856,6 +1901,44 @@ type UserPoolClientType struct { // A list of allowed logout URLs for the identity providers. LogoutURLs []string `type:"list"` + // Use this setting to choose which errors and responses are returned by Cognito + // APIs during authentication, account confirmation, and password recovery when + // the user does not exist in the user pool. When set to ENABLED and the user + // does not exist, authentication returns an error indicating either the username + // or password was incorrect, and account confirmation and password recovery + // return a response indicating a code was sent to a simulated destination. + // When set to LEGACY, those APIs will return a UserNotFoundException exception + // if the user does not exist in the user pool. + // + // Valid values include: + // + // * ENABLED - This prevents user existence-related errors. + // + // * LEGACY - This represents the old behavior of Cognito where user existence + // related errors are not prevented. + // + // This setting affects the behavior of following APIs: + // + // * AdminInitiateAuth + // + // * AdminRespondToAuthChallenge + // + // * InitiateAuth + // + // * RespondToAuthChallenge + // + // * ForgotPassword + // + // * ConfirmForgotPassword + // + // * ConfirmSignUp + // + // * ResendConfirmationCode + // + // After January 1st 2020, the value of PreventUserExistenceErrors will default + // to ENABLED for newly created user pool clients if no value is provided. + PreventUserExistenceErrors PreventUserExistenceErrorTypes `type:"string" enum:"true"` + // The Read-only attributes. ReadAttributes []string `type:"list"` diff --git a/service/configservice/api_enums.go b/service/configservice/api_enums.go index 564cee97d9a..f058e81c094 100644 --- a/service/configservice/api_enums.go +++ b/service/configservice/api_enums.go @@ -129,6 +129,43 @@ func (enum ConfigurationItemStatus) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type ConformancePackComplianceType string + +// Enum values for ConformancePackComplianceType +const ( + ConformancePackComplianceTypeCompliant ConformancePackComplianceType = "COMPLIANT" + ConformancePackComplianceTypeNonCompliant ConformancePackComplianceType = "NON_COMPLIANT" +) + +func (enum ConformancePackComplianceType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ConformancePackComplianceType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type ConformancePackState string + +// Enum values for ConformancePackState +const ( + ConformancePackStateCreateInProgress ConformancePackState = "CREATE_IN_PROGRESS" + ConformancePackStateCreateComplete ConformancePackState = "CREATE_COMPLETE" + ConformancePackStateCreateFailed ConformancePackState = "CREATE_FAILED" + ConformancePackStateDeleteInProgress ConformancePackState = "DELETE_IN_PROGRESS" + ConformancePackStateDeleteFailed ConformancePackState = "DELETE_FAILED" +) + +func (enum ConformancePackState) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ConformancePackState) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type DeliveryStatus string // Enum values for DeliveryStatus @@ -190,12 +227,12 @@ const ( MemberAccountRuleStatusCreateSuccessful MemberAccountRuleStatus = "CREATE_SUCCESSFUL" MemberAccountRuleStatusCreateInProgress MemberAccountRuleStatus = "CREATE_IN_PROGRESS" MemberAccountRuleStatusCreateFailed MemberAccountRuleStatus = "CREATE_FAILED" - MemberAccountRuleStatusUpdateSuccessful MemberAccountRuleStatus = "UPDATE_SUCCESSFUL" - MemberAccountRuleStatusUpdateFailed MemberAccountRuleStatus = "UPDATE_FAILED" - MemberAccountRuleStatusUpdateInProgress MemberAccountRuleStatus = "UPDATE_IN_PROGRESS" MemberAccountRuleStatusDeleteSuccessful MemberAccountRuleStatus = "DELETE_SUCCESSFUL" MemberAccountRuleStatusDeleteFailed MemberAccountRuleStatus = "DELETE_FAILED" MemberAccountRuleStatusDeleteInProgress MemberAccountRuleStatus = "DELETE_IN_PROGRESS" + MemberAccountRuleStatusUpdateSuccessful MemberAccountRuleStatus = "UPDATE_SUCCESSFUL" + MemberAccountRuleStatusUpdateInProgress MemberAccountRuleStatus = "UPDATE_IN_PROGRESS" + MemberAccountRuleStatusUpdateFailed MemberAccountRuleStatus = "UPDATE_FAILED" ) func (enum MemberAccountRuleStatus) MarshalValue() (string, error) { @@ -244,6 +281,54 @@ func (enum OrganizationConfigRuleTriggerType) MarshalValueBuf(b []byte) ([]byte, return append(b, enum...), nil } +type OrganizationResourceDetailedStatus string + +// Enum values for OrganizationResourceDetailedStatus +const ( + OrganizationResourceDetailedStatusCreateSuccessful OrganizationResourceDetailedStatus = "CREATE_SUCCESSFUL" + OrganizationResourceDetailedStatusCreateInProgress OrganizationResourceDetailedStatus = "CREATE_IN_PROGRESS" + OrganizationResourceDetailedStatusCreateFailed OrganizationResourceDetailedStatus = "CREATE_FAILED" + OrganizationResourceDetailedStatusDeleteSuccessful OrganizationResourceDetailedStatus = "DELETE_SUCCESSFUL" + OrganizationResourceDetailedStatusDeleteFailed OrganizationResourceDetailedStatus = "DELETE_FAILED" + OrganizationResourceDetailedStatusDeleteInProgress OrganizationResourceDetailedStatus = "DELETE_IN_PROGRESS" + OrganizationResourceDetailedStatusUpdateSuccessful OrganizationResourceDetailedStatus = "UPDATE_SUCCESSFUL" + OrganizationResourceDetailedStatusUpdateInProgress OrganizationResourceDetailedStatus = "UPDATE_IN_PROGRESS" + OrganizationResourceDetailedStatusUpdateFailed OrganizationResourceDetailedStatus = "UPDATE_FAILED" +) + +func (enum OrganizationResourceDetailedStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum OrganizationResourceDetailedStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type OrganizationResourceStatus string + +// Enum values for OrganizationResourceStatus +const ( + OrganizationResourceStatusCreateSuccessful OrganizationResourceStatus = "CREATE_SUCCESSFUL" + OrganizationResourceStatusCreateInProgress OrganizationResourceStatus = "CREATE_IN_PROGRESS" + OrganizationResourceStatusCreateFailed OrganizationResourceStatus = "CREATE_FAILED" + OrganizationResourceStatusDeleteSuccessful OrganizationResourceStatus = "DELETE_SUCCESSFUL" + OrganizationResourceStatusDeleteFailed OrganizationResourceStatus = "DELETE_FAILED" + OrganizationResourceStatusDeleteInProgress OrganizationResourceStatus = "DELETE_IN_PROGRESS" + OrganizationResourceStatusUpdateSuccessful OrganizationResourceStatus = "UPDATE_SUCCESSFUL" + OrganizationResourceStatusUpdateInProgress OrganizationResourceStatus = "UPDATE_IN_PROGRESS" + OrganizationResourceStatusUpdateFailed OrganizationResourceStatus = "UPDATE_FAILED" +) + +func (enum OrganizationResourceStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum OrganizationResourceStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type OrganizationRuleStatus string // Enum values for OrganizationRuleStatus @@ -251,12 +336,12 @@ const ( OrganizationRuleStatusCreateSuccessful OrganizationRuleStatus = "CREATE_SUCCESSFUL" OrganizationRuleStatusCreateInProgress OrganizationRuleStatus = "CREATE_IN_PROGRESS" OrganizationRuleStatusCreateFailed OrganizationRuleStatus = "CREATE_FAILED" - OrganizationRuleStatusUpdateSuccessful OrganizationRuleStatus = "UPDATE_SUCCESSFUL" - OrganizationRuleStatusUpdateFailed OrganizationRuleStatus = "UPDATE_FAILED" - OrganizationRuleStatusUpdateInProgress OrganizationRuleStatus = "UPDATE_IN_PROGRESS" OrganizationRuleStatusDeleteSuccessful OrganizationRuleStatus = "DELETE_SUCCESSFUL" OrganizationRuleStatusDeleteFailed OrganizationRuleStatus = "DELETE_FAILED" OrganizationRuleStatusDeleteInProgress OrganizationRuleStatus = "DELETE_IN_PROGRESS" + OrganizationRuleStatusUpdateSuccessful OrganizationRuleStatus = "UPDATE_SUCCESSFUL" + OrganizationRuleStatusUpdateInProgress OrganizationRuleStatus = "UPDATE_IN_PROGRESS" + OrganizationRuleStatusUpdateFailed OrganizationRuleStatus = "UPDATE_FAILED" ) func (enum OrganizationRuleStatus) MarshalValue() (string, error) { diff --git a/service/configservice/api_errors.go b/service/configservice/api_errors.go index 7ecd56b1bd9..82989cc735b 100644 --- a/service/configservice/api_errors.go +++ b/service/configservice/api_errors.go @@ -4,6 +4,12 @@ package configservice const ( + // ErrCodeConformancePackTemplateValidationException for service response error code + // "ConformancePackTemplateValidationException". + // + // You have specified a template that is not valid or supported. + ErrCodeConformancePackTemplateValidationException = "ConformancePackTemplateValidationException" + // ErrCodeInsufficientDeliveryPolicyException for service response error code // "InsufficientDeliveryPolicyException". // @@ -21,9 +27,14 @@ const ( // * For PutConfigRule, the AWS Lambda function cannot be invoked. Check // the function ARN, and check the function's permissions. // - // * For OrganizationConfigRule, organization config rule cannot be created + // * For PutOrganizationConfigRule, organization config rule cannot be created // because you do not have permissions to call IAM GetRole action or create - // service linked role. + // a service linked role. + // + // * For PutConformancePack and PutOrganizationConformancePack, a conformance + // pack cannot be created becuase you do not have permissions: To call IAM + // GetRole action or create a service linked role. To read Amazon S3 bucket. + // To create a rule and a stack. ErrCodeInsufficientPermissionsException = "InsufficientPermissionsException" // ErrCodeInvalidConfigurationRecorderNameException for service response error code @@ -134,6 +145,13 @@ const ( // You have reached the limit of the number of recorders you can create. ErrCodeMaxNumberOfConfigurationRecordersExceededException = "MaxNumberOfConfigurationRecordersExceededException" + // ErrCodeMaxNumberOfConformancePacksExceededException for service response error code + // "MaxNumberOfConformancePacksExceededException". + // + // You have reached the limit (20) of the number of conformance packs in an + // account. + ErrCodeMaxNumberOfConformancePacksExceededException = "MaxNumberOfConformancePacksExceededException" + // ErrCodeMaxNumberOfDeliveryChannelsExceededException for service response error code // "MaxNumberOfDeliveryChannelsExceededException". // @@ -147,6 +165,13 @@ const ( // can create. ErrCodeMaxNumberOfOrganizationConfigRulesExceededException = "MaxNumberOfOrganizationConfigRulesExceededException" + // ErrCodeMaxNumberOfOrganizationConformancePacksExceededException for service response error code + // "MaxNumberOfOrganizationConformancePacksExceededException". + // + // You have reached the limit (10) of the number of organization conformance + // packs in an account. + ErrCodeMaxNumberOfOrganizationConformancePacksExceededException = "MaxNumberOfOrganizationConformancePacksExceededException" + // ErrCodeMaxNumberOfRetentionConfigurationsExceededException for service response error code // "MaxNumberOfRetentionConfigurationsExceededException". // @@ -192,6 +217,12 @@ const ( // rule names are correct and try again. ErrCodeNoSuchConfigRuleException = "NoSuchConfigRuleException" + // ErrCodeNoSuchConfigRuleInConformancePackException for service response error code + // "NoSuchConfigRuleInConformancePackException". + // + // AWS Config rule that you passed in the filter does not exist. + ErrCodeNoSuchConfigRuleInConformancePackException = "NoSuchConfigRuleInConformancePackException" + // ErrCodeNoSuchConfigurationAggregatorException for service response error code // "NoSuchConfigurationAggregatorException". // @@ -204,6 +235,12 @@ const ( // You have specified a configuration recorder that does not exist. ErrCodeNoSuchConfigurationRecorderException = "NoSuchConfigurationRecorderException" + // ErrCodeNoSuchConformancePackException for service response error code + // "NoSuchConformancePackException". + // + // You specified one or more conformance packs that do not exist. + ErrCodeNoSuchConformancePackException = "NoSuchConformancePackException" + // ErrCodeNoSuchDeliveryChannelException for service response error code // "NoSuchDeliveryChannelException". // @@ -216,6 +253,16 @@ const ( // You specified one or more organization config rules that do not exist. ErrCodeNoSuchOrganizationConfigRuleException = "NoSuchOrganizationConfigRuleException" + // ErrCodeNoSuchOrganizationConformancePackException for service response error code + // "NoSuchOrganizationConformancePackException". + // + // AWS Config organization conformance pack that you passed in the filter does + // not exist. + // + // For DeleteOrganizationConformancePack, you tried to delete an organization + // conformance pack that does not exist. + ErrCodeNoSuchOrganizationConformancePackException = "NoSuchOrganizationConformancePackException" + // ErrCodeNoSuchRemediationConfigurationException for service response error code // "NoSuchRemediationConfigurationException". // @@ -240,9 +287,9 @@ const ( // For PutConfigAggregator API, no permission to call EnableAWSServiceAccess // API. // - // For all OrganizationConfigRule APIs, AWS Config throws an exception if APIs - // are called from member accounts. All APIs must be called from organization - // master account. + // For all OrganizationConfigRule and OrganizationConformancePack APIs, AWS + // Config throws an exception if APIs are called from member accounts. All APIs + // must be called from organization master account. ErrCodeOrganizationAccessDeniedException = "OrganizationAccessDeniedException" // ErrCodeOrganizationAllFeaturesNotEnabledException for service response error code @@ -252,6 +299,12 @@ const ( // have all features enabled. ErrCodeOrganizationAllFeaturesNotEnabledException = "OrganizationAllFeaturesNotEnabledException" + // ErrCodeOrganizationConformancePackTemplateValidationException for service response error code + // "OrganizationConformancePackTemplateValidationException". + // + // You have specified a template that is not valid or supported. + ErrCodeOrganizationConformancePackTemplateValidationException = "OrganizationConformancePackTemplateValidationException" + // ErrCodeOversizedConfigurationItemException for service response error code // "OversizedConfigurationItemException". // @@ -270,14 +323,14 @@ const ( // // You see this exception in the following cases: // - // * For DeleteConfigRule API, AWS Config is deleting this rule. Try your - // request again later. + // * For DeleteConfigRule, AWS Config is deleting this rule. Try your request + // again later. // - // * For DeleteConfigRule API, the rule is deleting your evaluation results. + // * For DeleteConfigRule, the rule is deleting your evaluation results. // Try your request again later. // - // * For DeleteConfigRule API, a remediation action is associated with the - // rule and AWS Config cannot delete this rule. Delete the remediation action + // * For DeleteConfigRule, a remediation action is associated with the rule + // and AWS Config cannot delete this rule. Delete the remediation action // associated with the rule before deleting the rule and try your request // again later. // @@ -286,6 +339,13 @@ const ( // // * For DeleteOrganizationConfigRule, organization config rule creation // is in progress. Try your request again later. + // + // * For PutConformancePack and PutOrganizationConformancePack, a conformance + // pack creation, update, and deletion is in progress. Try your request again + // later. + // + // * For DeleteConformancePack, a conformance pack creation, update, and + // deletion is in progress. Try your request again later. ErrCodeResourceInUseException = "ResourceInUseException" // ErrCodeResourceNotDiscoveredException for service response error code diff --git a/service/configservice/api_op_DeleteConformancePack.go b/service/configservice/api_op_DeleteConformancePack.go new file mode 100644 index 00000000000..336fe50f7e3 --- /dev/null +++ b/service/configservice/api_op_DeleteConformancePack.go @@ -0,0 +1,126 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package configservice + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" +) + +type DeleteConformancePackInput struct { + _ struct{} `type:"structure"` + + // Name of the conformance pack you want to delete. + // + // ConformancePackName is a required field + ConformancePackName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConformancePackInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConformancePackInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteConformancePackInput"} + + if s.ConformancePackName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConformancePackName")) + } + if s.ConformancePackName != nil && len(*s.ConformancePackName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ConformancePackName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteConformancePackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConformancePackOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteConformancePack = "DeleteConformancePack" + +// DeleteConformancePackRequest returns a request value for making API operation for +// AWS Config. +// +// Deletes the specified conformance pack and all the AWS Config rules and all +// evaluation results within that conformance pack. +// +// AWS Config sets the conformance pack to DELETE_IN_PROGRESS until the deletion +// is complete. You cannot update a conformance pack while it is in this state. +// +// // Example sending a request using DeleteConformancePackRequest. +// req := client.DeleteConformancePackRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DeleteConformancePack +func (c *Client) DeleteConformancePackRequest(input *DeleteConformancePackInput) DeleteConformancePackRequest { + op := &aws.Operation{ + Name: opDeleteConformancePack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConformancePackInput{} + } + + req := c.newRequest(op, input, &DeleteConformancePackOutput{}) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteConformancePackRequest{Request: req, Input: input, Copy: c.DeleteConformancePackRequest} +} + +// DeleteConformancePackRequest is the request type for the +// DeleteConformancePack API operation. +type DeleteConformancePackRequest struct { + *aws.Request + Input *DeleteConformancePackInput + Copy func(*DeleteConformancePackInput) DeleteConformancePackRequest +} + +// Send marshals and sends the DeleteConformancePack API request. +func (r DeleteConformancePackRequest) Send(ctx context.Context) (*DeleteConformancePackResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteConformancePackResponse{ + DeleteConformancePackOutput: r.Request.Data.(*DeleteConformancePackOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteConformancePackResponse is the response type for the +// DeleteConformancePack API operation. +type DeleteConformancePackResponse struct { + *DeleteConformancePackOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteConformancePack request. +func (r *DeleteConformancePackResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/configservice/api_op_DeleteOrganizationConformancePack.go b/service/configservice/api_op_DeleteOrganizationConformancePack.go new file mode 100644 index 00000000000..6d7359b5302 --- /dev/null +++ b/service/configservice/api_op_DeleteOrganizationConformancePack.go @@ -0,0 +1,128 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package configservice + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" +) + +type DeleteOrganizationConformancePackInput struct { + _ struct{} `type:"structure"` + + // The name of organization conformance pack that you want to delete. + // + // OrganizationConformancePackName is a required field + OrganizationConformancePackName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteOrganizationConformancePackInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteOrganizationConformancePackInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteOrganizationConformancePackInput"} + + if s.OrganizationConformancePackName == nil { + invalidParams.Add(aws.NewErrParamRequired("OrganizationConformancePackName")) + } + if s.OrganizationConformancePackName != nil && len(*s.OrganizationConformancePackName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("OrganizationConformancePackName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteOrganizationConformancePackOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteOrganizationConformancePackOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteOrganizationConformancePack = "DeleteOrganizationConformancePack" + +// DeleteOrganizationConformancePackRequest returns a request value for making API operation for +// AWS Config. +// +// Deletes the specified organization conformance pack and all of the config +// rules and remediation actions from all member accounts in that organization. +// Only a master account can delete an organization conformance pack. +// +// AWS Config sets the state of a conformance pack to DELETE_IN_PROGRESS until +// the deletion is complete. You cannot update a conformance pack while it is +// in this state. +// +// // Example sending a request using DeleteOrganizationConformancePackRequest. +// req := client.DeleteOrganizationConformancePackRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DeleteOrganizationConformancePack +func (c *Client) DeleteOrganizationConformancePackRequest(input *DeleteOrganizationConformancePackInput) DeleteOrganizationConformancePackRequest { + op := &aws.Operation{ + Name: opDeleteOrganizationConformancePack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteOrganizationConformancePackInput{} + } + + req := c.newRequest(op, input, &DeleteOrganizationConformancePackOutput{}) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteOrganizationConformancePackRequest{Request: req, Input: input, Copy: c.DeleteOrganizationConformancePackRequest} +} + +// DeleteOrganizationConformancePackRequest is the request type for the +// DeleteOrganizationConformancePack API operation. +type DeleteOrganizationConformancePackRequest struct { + *aws.Request + Input *DeleteOrganizationConformancePackInput + Copy func(*DeleteOrganizationConformancePackInput) DeleteOrganizationConformancePackRequest +} + +// Send marshals and sends the DeleteOrganizationConformancePack API request. +func (r DeleteOrganizationConformancePackRequest) Send(ctx context.Context) (*DeleteOrganizationConformancePackResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteOrganizationConformancePackResponse{ + DeleteOrganizationConformancePackOutput: r.Request.Data.(*DeleteOrganizationConformancePackOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteOrganizationConformancePackResponse is the response type for the +// DeleteOrganizationConformancePack API operation. +type DeleteOrganizationConformancePackResponse struct { + *DeleteOrganizationConformancePackOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteOrganizationConformancePack request. +func (r *DeleteOrganizationConformancePackResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/configservice/api_op_DescribeConformancePackCompliance.go b/service/configservice/api_op_DescribeConformancePackCompliance.go new file mode 100644 index 00000000000..f046bc5c1c1 --- /dev/null +++ b/service/configservice/api_op_DescribeConformancePackCompliance.go @@ -0,0 +1,146 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package configservice + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeConformancePackComplianceInput struct { + _ struct{} `type:"structure"` + + // Name of the conformance pack. + // + // ConformancePackName is a required field + ConformancePackName *string `min:"1" type:"string" required:"true"` + + // A ConformancePackComplianceFilters object. + Filters *ConformancePackComplianceFilters `type:"structure"` + + // The maximum number of AWS Config rules within a conformance pack are returned + // on each page. + Limit *int64 `type:"integer"` + + // The nextToken string returned in a previous request that you use to request + // the next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConformancePackComplianceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConformancePackComplianceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeConformancePackComplianceInput"} + + if s.ConformancePackName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConformancePackName")) + } + if s.ConformancePackName != nil && len(*s.ConformancePackName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ConformancePackName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeConformancePackComplianceOutput struct { + _ struct{} `type:"structure"` + + // Name of the conformance pack. + // + // ConformancePackName is a required field + ConformancePackName *string `min:"1" type:"string" required:"true"` + + // Returns a list of ConformancePackRuleCompliance objects. + // + // ConformancePackRuleComplianceList is a required field + ConformancePackRuleComplianceList []ConformancePackRuleCompliance `type:"list" required:"true"` + + // The nextToken string returned in a previous request that you use to request + // the next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConformancePackComplianceOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeConformancePackCompliance = "DescribeConformancePackCompliance" + +// DescribeConformancePackComplianceRequest returns a request value for making API operation for +// AWS Config. +// +// Returns compliance information for each rule in that conformance pack. +// +// You must provide exact rule names otherwise AWS Config cannot return evaluation +// results due to insufficient data. +// +// // Example sending a request using DescribeConformancePackComplianceRequest. +// req := client.DescribeConformancePackComplianceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeConformancePackCompliance +func (c *Client) DescribeConformancePackComplianceRequest(input *DescribeConformancePackComplianceInput) DescribeConformancePackComplianceRequest { + op := &aws.Operation{ + Name: opDescribeConformancePackCompliance, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConformancePackComplianceInput{} + } + + req := c.newRequest(op, input, &DescribeConformancePackComplianceOutput{}) + return DescribeConformancePackComplianceRequest{Request: req, Input: input, Copy: c.DescribeConformancePackComplianceRequest} +} + +// DescribeConformancePackComplianceRequest is the request type for the +// DescribeConformancePackCompliance API operation. +type DescribeConformancePackComplianceRequest struct { + *aws.Request + Input *DescribeConformancePackComplianceInput + Copy func(*DescribeConformancePackComplianceInput) DescribeConformancePackComplianceRequest +} + +// Send marshals and sends the DescribeConformancePackCompliance API request. +func (r DescribeConformancePackComplianceRequest) Send(ctx context.Context) (*DescribeConformancePackComplianceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeConformancePackComplianceResponse{ + DescribeConformancePackComplianceOutput: r.Request.Data.(*DescribeConformancePackComplianceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeConformancePackComplianceResponse is the response type for the +// DescribeConformancePackCompliance API operation. +type DescribeConformancePackComplianceResponse struct { + *DescribeConformancePackComplianceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeConformancePackCompliance request. +func (r *DescribeConformancePackComplianceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/configservice/api_op_DescribeConformancePackStatus.go b/service/configservice/api_op_DescribeConformancePackStatus.go new file mode 100644 index 00000000000..460e173b9bd --- /dev/null +++ b/service/configservice/api_op_DescribeConformancePackStatus.go @@ -0,0 +1,113 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package configservice + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeConformancePackStatusInput struct { + _ struct{} `type:"structure"` + + // Comma-separated list of conformance pack names. + ConformancePackNames []string `type:"list"` + + // The maximum number of conformance packs returned on each page. + Limit *int64 `type:"integer"` + + // The nextToken string returned in a previous request that you use to request + // the next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConformancePackStatusInput) String() string { + return awsutil.Prettify(s) +} + +type DescribeConformancePackStatusOutput struct { + _ struct{} `type:"structure"` + + // A list of ConformancePackStatusDetail objects. + ConformancePackStatusDetails []ConformancePackStatusDetail `type:"list"` + + // The nextToken string returned in a previous request that you use to request + // the next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConformancePackStatusOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeConformancePackStatus = "DescribeConformancePackStatus" + +// DescribeConformancePackStatusRequest returns a request value for making API operation for +// AWS Config. +// +// Provides one or more conformance packs deployment status. +// +// // Example sending a request using DescribeConformancePackStatusRequest. +// req := client.DescribeConformancePackStatusRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeConformancePackStatus +func (c *Client) DescribeConformancePackStatusRequest(input *DescribeConformancePackStatusInput) DescribeConformancePackStatusRequest { + op := &aws.Operation{ + Name: opDescribeConformancePackStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConformancePackStatusInput{} + } + + req := c.newRequest(op, input, &DescribeConformancePackStatusOutput{}) + return DescribeConformancePackStatusRequest{Request: req, Input: input, Copy: c.DescribeConformancePackStatusRequest} +} + +// DescribeConformancePackStatusRequest is the request type for the +// DescribeConformancePackStatus API operation. +type DescribeConformancePackStatusRequest struct { + *aws.Request + Input *DescribeConformancePackStatusInput + Copy func(*DescribeConformancePackStatusInput) DescribeConformancePackStatusRequest +} + +// Send marshals and sends the DescribeConformancePackStatus API request. +func (r DescribeConformancePackStatusRequest) Send(ctx context.Context) (*DescribeConformancePackStatusResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeConformancePackStatusResponse{ + DescribeConformancePackStatusOutput: r.Request.Data.(*DescribeConformancePackStatusOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeConformancePackStatusResponse is the response type for the +// DescribeConformancePackStatus API operation. +type DescribeConformancePackStatusResponse struct { + *DescribeConformancePackStatusOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeConformancePackStatus request. +func (r *DescribeConformancePackStatusResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/configservice/api_op_DescribeConformancePacks.go b/service/configservice/api_op_DescribeConformancePacks.go new file mode 100644 index 00000000000..d9d24201f61 --- /dev/null +++ b/service/configservice/api_op_DescribeConformancePacks.go @@ -0,0 +1,115 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package configservice + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeConformancePacksInput struct { + _ struct{} `type:"structure"` + + // Comma-separated list of conformance pack names for which you want details. + // If you do not specify any names, AWS Config returns details for all your + // conformance packs. + ConformancePackNames []string `type:"list"` + + // The maximum number of conformance packs returned on each page. + Limit *int64 `type:"integer"` + + // The nextToken string returned in a previous request that you use to request + // the next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConformancePacksInput) String() string { + return awsutil.Prettify(s) +} + +type DescribeConformancePacksOutput struct { + _ struct{} `type:"structure"` + + // Returns a list of ConformancePackDetail objects. + ConformancePackDetails []ConformancePackDetail `type:"list"` + + // The nextToken string returned in a previous request that you use to request + // the next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeConformancePacksOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeConformancePacks = "DescribeConformancePacks" + +// DescribeConformancePacksRequest returns a request value for making API operation for +// AWS Config. +// +// Returns a list of one or more conformance packs. +// +// // Example sending a request using DescribeConformancePacksRequest. +// req := client.DescribeConformancePacksRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeConformancePacks +func (c *Client) DescribeConformancePacksRequest(input *DescribeConformancePacksInput) DescribeConformancePacksRequest { + op := &aws.Operation{ + Name: opDescribeConformancePacks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConformancePacksInput{} + } + + req := c.newRequest(op, input, &DescribeConformancePacksOutput{}) + return DescribeConformancePacksRequest{Request: req, Input: input, Copy: c.DescribeConformancePacksRequest} +} + +// DescribeConformancePacksRequest is the request type for the +// DescribeConformancePacks API operation. +type DescribeConformancePacksRequest struct { + *aws.Request + Input *DescribeConformancePacksInput + Copy func(*DescribeConformancePacksInput) DescribeConformancePacksRequest +} + +// Send marshals and sends the DescribeConformancePacks API request. +func (r DescribeConformancePacksRequest) Send(ctx context.Context) (*DescribeConformancePacksResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeConformancePacksResponse{ + DescribeConformancePacksOutput: r.Request.Data.(*DescribeConformancePacksOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeConformancePacksResponse is the response type for the +// DescribeConformancePacks API operation. +type DescribeConformancePacksResponse struct { + *DescribeConformancePacksOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeConformancePacks request. +func (r *DescribeConformancePacksResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/configservice/api_op_DescribeOrganizationConfigRules.go b/service/configservice/api_op_DescribeOrganizationConfigRules.go index 4847f99776a..7449b39366c 100644 --- a/service/configservice/api_op_DescribeOrganizationConfigRules.go +++ b/service/configservice/api_op_DescribeOrganizationConfigRules.go @@ -38,7 +38,7 @@ type DescribeOrganizationConfigRulesOutput struct { // next page of results in a paginated response. NextToken *string `type:"string"` - // Retuns a list OrganizationConfigRule objects. + // Returns a list of OrganizationConfigRule objects. OrganizationConfigRules []OrganizationConfigRule `type:"list"` } diff --git a/service/configservice/api_op_DescribeOrganizationConformancePackStatuses.go b/service/configservice/api_op_DescribeOrganizationConformancePackStatuses.go new file mode 100644 index 00000000000..14bd1fb5ef1 --- /dev/null +++ b/service/configservice/api_op_DescribeOrganizationConformancePackStatuses.go @@ -0,0 +1,128 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package configservice + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeOrganizationConformancePackStatusesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of OrganizationConformancePackStatuses returned on each + // page. If you do no specify a number, AWS Config uses the default. The default + // is 100. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // The names of organization conformance packs for which you want status details. + // If you do not specify any names, AWS Config returns details for all your + // organization conformance packs. + OrganizationConformancePackNames []string `type:"list"` +} + +// String returns the string representation +func (s DescribeOrganizationConformancePackStatusesInput) String() string { + return awsutil.Prettify(s) +} + +type DescribeOrganizationConformancePackStatusesOutput struct { + _ struct{} `type:"structure"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // A list of OrganizationConformancePackStatus objects. + OrganizationConformancePackStatuses []OrganizationConformancePackStatus `type:"list"` +} + +// String returns the string representation +func (s DescribeOrganizationConformancePackStatusesOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeOrganizationConformancePackStatuses = "DescribeOrganizationConformancePackStatuses" + +// DescribeOrganizationConformancePackStatusesRequest returns a request value for making API operation for +// AWS Config. +// +// Provides organization conformance pack deployment status for an organization. +// +// The status is not considered successful until organization conformance pack +// is successfully deployed in all the member accounts with an exception of +// excluded accounts. +// +// When you specify the limit and the next token, you receive a paginated response. +// Limit and next token are not applicable if you specify organization conformance +// pack names. They are only applicable, when you request all the organization +// conformance packs. +// +// Only a master account can call this API. +// +// // Example sending a request using DescribeOrganizationConformancePackStatusesRequest. +// req := client.DescribeOrganizationConformancePackStatusesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeOrganizationConformancePackStatuses +func (c *Client) DescribeOrganizationConformancePackStatusesRequest(input *DescribeOrganizationConformancePackStatusesInput) DescribeOrganizationConformancePackStatusesRequest { + op := &aws.Operation{ + Name: opDescribeOrganizationConformancePackStatuses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeOrganizationConformancePackStatusesInput{} + } + + req := c.newRequest(op, input, &DescribeOrganizationConformancePackStatusesOutput{}) + return DescribeOrganizationConformancePackStatusesRequest{Request: req, Input: input, Copy: c.DescribeOrganizationConformancePackStatusesRequest} +} + +// DescribeOrganizationConformancePackStatusesRequest is the request type for the +// DescribeOrganizationConformancePackStatuses API operation. +type DescribeOrganizationConformancePackStatusesRequest struct { + *aws.Request + Input *DescribeOrganizationConformancePackStatusesInput + Copy func(*DescribeOrganizationConformancePackStatusesInput) DescribeOrganizationConformancePackStatusesRequest +} + +// Send marshals and sends the DescribeOrganizationConformancePackStatuses API request. +func (r DescribeOrganizationConformancePackStatusesRequest) Send(ctx context.Context) (*DescribeOrganizationConformancePackStatusesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeOrganizationConformancePackStatusesResponse{ + DescribeOrganizationConformancePackStatusesOutput: r.Request.Data.(*DescribeOrganizationConformancePackStatusesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeOrganizationConformancePackStatusesResponse is the response type for the +// DescribeOrganizationConformancePackStatuses API operation. +type DescribeOrganizationConformancePackStatusesResponse struct { + *DescribeOrganizationConformancePackStatusesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeOrganizationConformancePackStatuses request. +func (r *DescribeOrganizationConformancePackStatusesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/configservice/api_op_DescribeOrganizationConformancePacks.go b/service/configservice/api_op_DescribeOrganizationConformancePacks.go new file mode 100644 index 00000000000..86d799070d0 --- /dev/null +++ b/service/configservice/api_op_DescribeOrganizationConformancePacks.go @@ -0,0 +1,119 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package configservice + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeOrganizationConformancePacksInput struct { + _ struct{} `type:"structure"` + + // The maximum number of organization config packs returned on each page. If + // you do no specify a number, AWS Config uses the default. The default is 100. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // The name that you assign to an organization conformance pack. + OrganizationConformancePackNames []string `type:"list"` +} + +// String returns the string representation +func (s DescribeOrganizationConformancePacksInput) String() string { + return awsutil.Prettify(s) +} + +type DescribeOrganizationConformancePacksOutput struct { + _ struct{} `type:"structure"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // Returns a list of OrganizationConformancePacks objects. + OrganizationConformancePacks []OrganizationConformancePack `type:"list"` +} + +// String returns the string representation +func (s DescribeOrganizationConformancePacksOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeOrganizationConformancePacks = "DescribeOrganizationConformancePacks" + +// DescribeOrganizationConformancePacksRequest returns a request value for making API operation for +// AWS Config. +// +// Returns a list of organization conformance packs. +// +// When you specify the limit and the next token, you receive a paginated response. +// Limit and next token are not applicable if you specify organization conformance +// packs names. They are only applicable, when you request all the organization +// conformance packs. Only a master account can call this API. +// +// // Example sending a request using DescribeOrganizationConformancePacksRequest. +// req := client.DescribeOrganizationConformancePacksRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeOrganizationConformancePacks +func (c *Client) DescribeOrganizationConformancePacksRequest(input *DescribeOrganizationConformancePacksInput) DescribeOrganizationConformancePacksRequest { + op := &aws.Operation{ + Name: opDescribeOrganizationConformancePacks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeOrganizationConformancePacksInput{} + } + + req := c.newRequest(op, input, &DescribeOrganizationConformancePacksOutput{}) + return DescribeOrganizationConformancePacksRequest{Request: req, Input: input, Copy: c.DescribeOrganizationConformancePacksRequest} +} + +// DescribeOrganizationConformancePacksRequest is the request type for the +// DescribeOrganizationConformancePacks API operation. +type DescribeOrganizationConformancePacksRequest struct { + *aws.Request + Input *DescribeOrganizationConformancePacksInput + Copy func(*DescribeOrganizationConformancePacksInput) DescribeOrganizationConformancePacksRequest +} + +// Send marshals and sends the DescribeOrganizationConformancePacks API request. +func (r DescribeOrganizationConformancePacksRequest) Send(ctx context.Context) (*DescribeOrganizationConformancePacksResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeOrganizationConformancePacksResponse{ + DescribeOrganizationConformancePacksOutput: r.Request.Data.(*DescribeOrganizationConformancePacksOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeOrganizationConformancePacksResponse is the response type for the +// DescribeOrganizationConformancePacks API operation. +type DescribeOrganizationConformancePacksResponse struct { + *DescribeOrganizationConformancePacksOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeOrganizationConformancePacks request. +func (r *DescribeOrganizationConformancePacksResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/configservice/api_op_GetConformancePackComplianceDetails.go b/service/configservice/api_op_GetConformancePackComplianceDetails.go new file mode 100644 index 00000000000..df5271014c9 --- /dev/null +++ b/service/configservice/api_op_GetConformancePackComplianceDetails.go @@ -0,0 +1,147 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package configservice + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type GetConformancePackComplianceDetailsInput struct { + _ struct{} `type:"structure"` + + // Name of the conformance pack. + // + // ConformancePackName is a required field + ConformancePackName *string `min:"1" type:"string" required:"true"` + + // A ConformancePackEvaluationFilters object. + Filters *ConformancePackEvaluationFilters `type:"structure"` + + // The maximum number of evaluation results returned on each page. If you do + // no specify a number, AWS Config uses the default. The default is 100. + Limit *int64 `type:"integer"` + + // The nextToken string returned in a previous request that you use to request + // the next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetConformancePackComplianceDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetConformancePackComplianceDetailsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetConformancePackComplianceDetailsInput"} + + if s.ConformancePackName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConformancePackName")) + } + if s.ConformancePackName != nil && len(*s.ConformancePackName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ConformancePackName", 1)) + } + if s.Filters != nil { + if err := s.Filters.Validate(); err != nil { + invalidParams.AddNested("Filters", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetConformancePackComplianceDetailsOutput struct { + _ struct{} `type:"structure"` + + // Name of the conformance pack. + // + // ConformancePackName is a required field + ConformancePackName *string `min:"1" type:"string" required:"true"` + + // Returns a list of ConformancePackEvaluationResult objects. + ConformancePackRuleEvaluationResults []ConformancePackEvaluationResult `type:"list"` + + // The nextToken string returned in a previous request that you use to request + // the next page of results in a paginated response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetConformancePackComplianceDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetConformancePackComplianceDetails = "GetConformancePackComplianceDetails" + +// GetConformancePackComplianceDetailsRequest returns a request value for making API operation for +// AWS Config. +// +// Returns compliance details of a conformance pack for all AWS resources that +// are monitered by conformance pack. +// +// // Example sending a request using GetConformancePackComplianceDetailsRequest. +// req := client.GetConformancePackComplianceDetailsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetConformancePackComplianceDetails +func (c *Client) GetConformancePackComplianceDetailsRequest(input *GetConformancePackComplianceDetailsInput) GetConformancePackComplianceDetailsRequest { + op := &aws.Operation{ + Name: opGetConformancePackComplianceDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConformancePackComplianceDetailsInput{} + } + + req := c.newRequest(op, input, &GetConformancePackComplianceDetailsOutput{}) + return GetConformancePackComplianceDetailsRequest{Request: req, Input: input, Copy: c.GetConformancePackComplianceDetailsRequest} +} + +// GetConformancePackComplianceDetailsRequest is the request type for the +// GetConformancePackComplianceDetails API operation. +type GetConformancePackComplianceDetailsRequest struct { + *aws.Request + Input *GetConformancePackComplianceDetailsInput + Copy func(*GetConformancePackComplianceDetailsInput) GetConformancePackComplianceDetailsRequest +} + +// Send marshals and sends the GetConformancePackComplianceDetails API request. +func (r GetConformancePackComplianceDetailsRequest) Send(ctx context.Context) (*GetConformancePackComplianceDetailsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetConformancePackComplianceDetailsResponse{ + GetConformancePackComplianceDetailsOutput: r.Request.Data.(*GetConformancePackComplianceDetailsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetConformancePackComplianceDetailsResponse is the response type for the +// GetConformancePackComplianceDetails API operation. +type GetConformancePackComplianceDetailsResponse struct { + *GetConformancePackComplianceDetailsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetConformancePackComplianceDetails request. +func (r *GetConformancePackComplianceDetailsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/configservice/api_op_GetConformancePackComplianceSummary.go b/service/configservice/api_op_GetConformancePackComplianceSummary.go new file mode 100644 index 00000000000..495cef0ee65 --- /dev/null +++ b/service/configservice/api_op_GetConformancePackComplianceSummary.go @@ -0,0 +1,122 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package configservice + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type GetConformancePackComplianceSummaryInput struct { + _ struct{} `type:"structure"` + + // ConformancePackNames is a required field + ConformancePackNames []string `min:"1" type:"list" required:"true"` + + Limit *int64 `type:"integer"` + + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetConformancePackComplianceSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetConformancePackComplianceSummaryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetConformancePackComplianceSummaryInput"} + + if s.ConformancePackNames == nil { + invalidParams.Add(aws.NewErrParamRequired("ConformancePackNames")) + } + if s.ConformancePackNames != nil && len(s.ConformancePackNames) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ConformancePackNames", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetConformancePackComplianceSummaryOutput struct { + _ struct{} `type:"structure"` + + ConformancePackComplianceSummaryList []ConformancePackComplianceSummary `min:"1" type:"list"` + + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetConformancePackComplianceSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetConformancePackComplianceSummary = "GetConformancePackComplianceSummary" + +// GetConformancePackComplianceSummaryRequest returns a request value for making API operation for +// AWS Config. +// +// // Example sending a request using GetConformancePackComplianceSummaryRequest. +// req := client.GetConformancePackComplianceSummaryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetConformancePackComplianceSummary +func (c *Client) GetConformancePackComplianceSummaryRequest(input *GetConformancePackComplianceSummaryInput) GetConformancePackComplianceSummaryRequest { + op := &aws.Operation{ + Name: opGetConformancePackComplianceSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConformancePackComplianceSummaryInput{} + } + + req := c.newRequest(op, input, &GetConformancePackComplianceSummaryOutput{}) + return GetConformancePackComplianceSummaryRequest{Request: req, Input: input, Copy: c.GetConformancePackComplianceSummaryRequest} +} + +// GetConformancePackComplianceSummaryRequest is the request type for the +// GetConformancePackComplianceSummary API operation. +type GetConformancePackComplianceSummaryRequest struct { + *aws.Request + Input *GetConformancePackComplianceSummaryInput + Copy func(*GetConformancePackComplianceSummaryInput) GetConformancePackComplianceSummaryRequest +} + +// Send marshals and sends the GetConformancePackComplianceSummary API request. +func (r GetConformancePackComplianceSummaryRequest) Send(ctx context.Context) (*GetConformancePackComplianceSummaryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetConformancePackComplianceSummaryResponse{ + GetConformancePackComplianceSummaryOutput: r.Request.Data.(*GetConformancePackComplianceSummaryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetConformancePackComplianceSummaryResponse is the response type for the +// GetConformancePackComplianceSummary API operation. +type GetConformancePackComplianceSummaryResponse struct { + *GetConformancePackComplianceSummaryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetConformancePackComplianceSummary request. +func (r *GetConformancePackComplianceSummaryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/configservice/api_op_GetOrganizationConformancePackDetailedStatus.go b/service/configservice/api_op_GetOrganizationConformancePackDetailedStatus.go new file mode 100644 index 00000000000..4f5af063a6f --- /dev/null +++ b/service/configservice/api_op_GetOrganizationConformancePackDetailedStatus.go @@ -0,0 +1,141 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package configservice + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type GetOrganizationConformancePackDetailedStatusInput struct { + _ struct{} `type:"structure"` + + // An OrganizationResourceDetailedStatusFilters object. + Filters *OrganizationResourceDetailedStatusFilters `type:"structure"` + + // The maximum number of OrganizationConformancePackDetailedStatuses returned + // on each page. If you do not specify a number, AWS Config uses the default. + // The default is 100. + Limit *int64 `type:"integer"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // The name of organization conformance pack for which you want status details + // for member accounts. + // + // OrganizationConformancePackName is a required field + OrganizationConformancePackName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOrganizationConformancePackDetailedStatusInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOrganizationConformancePackDetailedStatusInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetOrganizationConformancePackDetailedStatusInput"} + + if s.OrganizationConformancePackName == nil { + invalidParams.Add(aws.NewErrParamRequired("OrganizationConformancePackName")) + } + if s.OrganizationConformancePackName != nil && len(*s.OrganizationConformancePackName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("OrganizationConformancePackName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetOrganizationConformancePackDetailedStatusOutput struct { + _ struct{} `type:"structure"` + + // The nextToken string returned on a previous page that you use to get the + // next page of results in a paginated response. + NextToken *string `type:"string"` + + // A list of OrganizationConformancePackDetailedStatus objects. + OrganizationConformancePackDetailedStatuses []OrganizationConformancePackDetailedStatus `type:"list"` +} + +// String returns the string representation +func (s GetOrganizationConformancePackDetailedStatusOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetOrganizationConformancePackDetailedStatus = "GetOrganizationConformancePackDetailedStatus" + +// GetOrganizationConformancePackDetailedStatusRequest returns a request value for making API operation for +// AWS Config. +// +// Returns detailed status for each member account within an organization for +// a given organization conformance pack. +// +// Only a master account can call this API. +// +// // Example sending a request using GetOrganizationConformancePackDetailedStatusRequest. +// req := client.GetOrganizationConformancePackDetailedStatusRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/GetOrganizationConformancePackDetailedStatus +func (c *Client) GetOrganizationConformancePackDetailedStatusRequest(input *GetOrganizationConformancePackDetailedStatusInput) GetOrganizationConformancePackDetailedStatusRequest { + op := &aws.Operation{ + Name: opGetOrganizationConformancePackDetailedStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOrganizationConformancePackDetailedStatusInput{} + } + + req := c.newRequest(op, input, &GetOrganizationConformancePackDetailedStatusOutput{}) + return GetOrganizationConformancePackDetailedStatusRequest{Request: req, Input: input, Copy: c.GetOrganizationConformancePackDetailedStatusRequest} +} + +// GetOrganizationConformancePackDetailedStatusRequest is the request type for the +// GetOrganizationConformancePackDetailedStatus API operation. +type GetOrganizationConformancePackDetailedStatusRequest struct { + *aws.Request + Input *GetOrganizationConformancePackDetailedStatusInput + Copy func(*GetOrganizationConformancePackDetailedStatusInput) GetOrganizationConformancePackDetailedStatusRequest +} + +// Send marshals and sends the GetOrganizationConformancePackDetailedStatus API request. +func (r GetOrganizationConformancePackDetailedStatusRequest) Send(ctx context.Context) (*GetOrganizationConformancePackDetailedStatusResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetOrganizationConformancePackDetailedStatusResponse{ + GetOrganizationConformancePackDetailedStatusOutput: r.Request.Data.(*GetOrganizationConformancePackDetailedStatusOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetOrganizationConformancePackDetailedStatusResponse is the response type for the +// GetOrganizationConformancePackDetailedStatus API operation. +type GetOrganizationConformancePackDetailedStatusResponse struct { + *GetOrganizationConformancePackDetailedStatusOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetOrganizationConformancePackDetailedStatus request. +func (r *GetOrganizationConformancePackDetailedStatusResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/configservice/api_op_PutConformancePack.go b/service/configservice/api_op_PutConformancePack.go new file mode 100644 index 00000000000..a6fb7505e14 --- /dev/null +++ b/service/configservice/api_op_PutConformancePack.go @@ -0,0 +1,182 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package configservice + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type PutConformancePackInput struct { + _ struct{} `type:"structure"` + + // A list of ConformancePackInputParameter objects. + ConformancePackInputParameters []ConformancePackInputParameter `type:"list"` + + // Name of the conformance pack you want to create. + // + // ConformancePackName is a required field + ConformancePackName *string `min:"1" type:"string" required:"true"` + + // Location of an Amazon S3 bucket where AWS Config can deliver evaluation results. + // AWS Config stores intermediate files while processing conformance pack template. + // + // DeliveryS3Bucket is a required field + DeliveryS3Bucket *string `min:"3" type:"string" required:"true"` + + // The prefix for the Amazon S3 bucket. + DeliveryS3KeyPrefix *string `min:"1" type:"string"` + + // A string containing full conformance pack template body. Structure containing + // the template body with a minimum length of 1 byte and a maximum length of + // 51,200 bytes. + // + // You can only use a YAML template with one resource type, that is, config + // rule. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The uri must point to the + // conformance pack template (max size: 300,000 bytes) that is located in an + // Amazon S3 bucket in the same region as the conformance pack. + // + // You must have access to read Amazon S3 bucket. + TemplateS3Uri *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutConformancePackInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutConformancePackInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutConformancePackInput"} + + if s.ConformancePackName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConformancePackName")) + } + if s.ConformancePackName != nil && len(*s.ConformancePackName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ConformancePackName", 1)) + } + + if s.DeliveryS3Bucket == nil { + invalidParams.Add(aws.NewErrParamRequired("DeliveryS3Bucket")) + } + if s.DeliveryS3Bucket != nil && len(*s.DeliveryS3Bucket) < 3 { + invalidParams.Add(aws.NewErrParamMinLen("DeliveryS3Bucket", 3)) + } + if s.DeliveryS3KeyPrefix != nil && len(*s.DeliveryS3KeyPrefix) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DeliveryS3KeyPrefix", 1)) + } + if s.TemplateBody != nil && len(*s.TemplateBody) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateBody", 1)) + } + if s.TemplateS3Uri != nil && len(*s.TemplateS3Uri) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateS3Uri", 1)) + } + if s.ConformancePackInputParameters != nil { + for i, v := range s.ConformancePackInputParameters { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ConformancePackInputParameters", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutConformancePackOutput struct { + _ struct{} `type:"structure"` + + // ARN of the conformance pack. + ConformancePackArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutConformancePackOutput) String() string { + return awsutil.Prettify(s) +} + +const opPutConformancePack = "PutConformancePack" + +// PutConformancePackRequest returns a request value for making API operation for +// AWS Config. +// +// Creates or updates a conformance pack. A conformance pack is a collection +// of AWS Config rules that can be easily deployed in an account and a region. +// +// This API creates a service linked role AWSServiceRoleForConfigConforms in +// your account. The service linked role is created only when the role does +// not exist in your account. AWS Config verifies the existence of role with +// GetRole action. +// +// You must specify either the TemplateS3Uri or the TemplateBody parameter, +// but not both. If you provide both AWS Config uses the TemplateS3Uri parameter +// and ignores the TemplateBody parameter. +// +// // Example sending a request using PutConformancePackRequest. +// req := client.PutConformancePackRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutConformancePack +func (c *Client) PutConformancePackRequest(input *PutConformancePackInput) PutConformancePackRequest { + op := &aws.Operation{ + Name: opPutConformancePack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutConformancePackInput{} + } + + req := c.newRequest(op, input, &PutConformancePackOutput{}) + return PutConformancePackRequest{Request: req, Input: input, Copy: c.PutConformancePackRequest} +} + +// PutConformancePackRequest is the request type for the +// PutConformancePack API operation. +type PutConformancePackRequest struct { + *aws.Request + Input *PutConformancePackInput + Copy func(*PutConformancePackInput) PutConformancePackRequest +} + +// Send marshals and sends the PutConformancePack API request. +func (r PutConformancePackRequest) Send(ctx context.Context) (*PutConformancePackResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutConformancePackResponse{ + PutConformancePackOutput: r.Request.Data.(*PutConformancePackOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutConformancePackResponse is the response type for the +// PutConformancePack API operation. +type PutConformancePackResponse struct { + *PutConformancePackOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutConformancePack request. +func (r *PutConformancePackResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/configservice/api_op_PutOrganizationConformancePack.go b/service/configservice/api_op_PutOrganizationConformancePack.go new file mode 100644 index 00000000000..11be3cbee9c --- /dev/null +++ b/service/configservice/api_op_PutOrganizationConformancePack.go @@ -0,0 +1,184 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package configservice + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type PutOrganizationConformancePackInput struct { + _ struct{} `type:"structure"` + + // A list of ConformancePackInputParameter objects. + ConformancePackInputParameters []ConformancePackInputParameter `type:"list"` + + // Location of an Amazon S3 bucket where AWS Config can deliver evaluation results. + // AWS Config stores intermediate files while processing conformance pack template. + // + // DeliveryS3Bucket is a required field + DeliveryS3Bucket *string `min:"3" type:"string" required:"true"` + + // The prefix for the Amazon S3 bucket. + DeliveryS3KeyPrefix *string `min:"1" type:"string"` + + // A list of AWS accounts to be excluded from an organization conformance pack + // while deploying a conformance pack. + ExcludedAccounts []string `type:"list"` + + // Name of the organization conformance pack you want to create. + // + // OrganizationConformancePackName is a required field + OrganizationConformancePackName *string `min:"1" type:"string" required:"true"` + + // A string containing full conformance pack template body. Structure containing + // the template body with a minimum length of 1 byte and a maximum length of + // 51,200 bytes. + TemplateBody *string `min:"1" type:"string"` + + // Location of file containing the template body. The uri must point to the + // conformance pack template (max size: 300,000 bytes). + // + // You must have access to read Amazon S3 bucket. + TemplateS3Uri *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutOrganizationConformancePackInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutOrganizationConformancePackInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutOrganizationConformancePackInput"} + + if s.DeliveryS3Bucket == nil { + invalidParams.Add(aws.NewErrParamRequired("DeliveryS3Bucket")) + } + if s.DeliveryS3Bucket != nil && len(*s.DeliveryS3Bucket) < 3 { + invalidParams.Add(aws.NewErrParamMinLen("DeliveryS3Bucket", 3)) + } + if s.DeliveryS3KeyPrefix != nil && len(*s.DeliveryS3KeyPrefix) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DeliveryS3KeyPrefix", 1)) + } + + if s.OrganizationConformancePackName == nil { + invalidParams.Add(aws.NewErrParamRequired("OrganizationConformancePackName")) + } + if s.OrganizationConformancePackName != nil && len(*s.OrganizationConformancePackName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("OrganizationConformancePackName", 1)) + } + if s.TemplateBody != nil && len(*s.TemplateBody) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateBody", 1)) + } + if s.TemplateS3Uri != nil && len(*s.TemplateS3Uri) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateS3Uri", 1)) + } + if s.ConformancePackInputParameters != nil { + for i, v := range s.ConformancePackInputParameters { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ConformancePackInputParameters", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutOrganizationConformancePackOutput struct { + _ struct{} `type:"structure"` + + // ARN of the organization conformance pack. + OrganizationConformancePackArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutOrganizationConformancePackOutput) String() string { + return awsutil.Prettify(s) +} + +const opPutOrganizationConformancePack = "PutOrganizationConformancePack" + +// PutOrganizationConformancePackRequest returns a request value for making API operation for +// AWS Config. +// +// Deploys conformance packs across member accounts in an AWS Organization. +// +// This API enables organization service access through the EnableAWSServiceAccess +// action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup +// in the master account of your organization. The service linked role is created +// only when the role does not exist in the master account. AWS Config verifies +// the existence of role with GetRole action. +// +// The SPN is config-multiaccountsetup.amazonaws.com. +// +// You must specify either the TemplateS3Uri or the TemplateBody parameter, +// but not both. If you provide both AWS Config uses the TemplateS3Uri parameter +// and ignores the TemplateBody parameter. +// +// // Example sending a request using PutOrganizationConformancePackRequest. +// req := client.PutOrganizationConformancePackRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutOrganizationConformancePack +func (c *Client) PutOrganizationConformancePackRequest(input *PutOrganizationConformancePackInput) PutOrganizationConformancePackRequest { + op := &aws.Operation{ + Name: opPutOrganizationConformancePack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutOrganizationConformancePackInput{} + } + + req := c.newRequest(op, input, &PutOrganizationConformancePackOutput{}) + return PutOrganizationConformancePackRequest{Request: req, Input: input, Copy: c.PutOrganizationConformancePackRequest} +} + +// PutOrganizationConformancePackRequest is the request type for the +// PutOrganizationConformancePack API operation. +type PutOrganizationConformancePackRequest struct { + *aws.Request + Input *PutOrganizationConformancePackInput + Copy func(*PutOrganizationConformancePackInput) PutOrganizationConformancePackRequest +} + +// Send marshals and sends the PutOrganizationConformancePack API request. +func (r PutOrganizationConformancePackRequest) Send(ctx context.Context) (*PutOrganizationConformancePackResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutOrganizationConformancePackResponse{ + PutOrganizationConformancePackOutput: r.Request.Data.(*PutOrganizationConformancePackOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutOrganizationConformancePackResponse is the response type for the +// PutOrganizationConformancePack API operation. +type PutOrganizationConformancePackResponse struct { + *PutOrganizationConformancePackOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutOrganizationConformancePack request. +func (r *PutOrganizationConformancePackResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/configservice/api_types.go b/service/configservice/api_types.go index c5b69c16f37..43461837dd9 100644 --- a/service/configservice/api_types.go +++ b/service/configservice/api_types.go @@ -982,6 +982,276 @@ func (s ConfigurationRecorderStatus) String() string { return awsutil.Prettify(s) } +// Filters the conformance pack by compliance types and AWS Config rule names. +type ConformancePackComplianceFilters struct { + _ struct{} `type:"structure"` + + // Filters the results by compliance. + // + // The allowed values are COMPLIANT and NON_COMPLIANT. + ComplianceType ConformancePackComplianceType `type:"string" enum:"true"` + + // Filters the results by AWS Config rule names. + ConfigRuleNames []string `type:"list"` +} + +// String returns the string representation +func (s ConformancePackComplianceFilters) String() string { + return awsutil.Prettify(s) +} + +type ConformancePackComplianceSummary struct { + _ struct{} `type:"structure"` + + // ConformancePackComplianceStatus is a required field + ConformancePackComplianceStatus ConformancePackComplianceType `type:"string" required:"true" enum:"true"` + + // ConformancePackName is a required field + ConformancePackName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConformancePackComplianceSummary) String() string { + return awsutil.Prettify(s) +} + +// Returns details of a conformance pack. A conformance pack is a collection +// of AWS Config rules that can be easily deployed in an account and a region. +type ConformancePackDetail struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of the conformance pack. + // + // ConformancePackArn is a required field + ConformancePackArn *string `min:"1" type:"string" required:"true"` + + // ID of the conformance pack. + // + // ConformancePackId is a required field + ConformancePackId *string `min:"1" type:"string" required:"true"` + + // A list of ConformancePackInputParameter objects. + ConformancePackInputParameters []ConformancePackInputParameter `type:"list"` + + // Name of the conformance pack. + // + // ConformancePackName is a required field + ConformancePackName *string `min:"1" type:"string" required:"true"` + + CreatedBy *string `min:"1" type:"string"` + + // Location of an Amazon S3 bucket where AWS Config can deliver evaluation results + // and conformance pack template that is used to create a pack. + // + // DeliveryS3Bucket is a required field + DeliveryS3Bucket *string `min:"3" type:"string" required:"true"` + + // Any folder structure you want to add to an Amazon S3 bucket. + DeliveryS3KeyPrefix *string `min:"1" type:"string"` + + // Last time when conformation pack update was requested. + LastUpdateRequestedTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s ConformancePackDetail) String() string { + return awsutil.Prettify(s) +} + +// Filters a conformance pack by AWS Config rule names, compliance types, AWS +// resource types, and resource IDs. +type ConformancePackEvaluationFilters struct { + _ struct{} `type:"structure"` + + // Filters the results by compliance. + // + // The allowed values are COMPLIANT and NON_COMPLIANT. + ComplianceType ConformancePackComplianceType `type:"string" enum:"true"` + + // Filters the results by AWS Config rule names. + ConfigRuleNames []string `type:"list"` + + // Filters the results by resource IDs. + ResourceIds []string `type:"list"` + + // Filters the results by the resource type (for example, "AWS::EC2::Instance"). + ResourceType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ConformancePackEvaluationFilters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConformancePackEvaluationFilters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ConformancePackEvaluationFilters"} + if s.ResourceType != nil && len(*s.ResourceType) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ResourceType", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The details of a conformance pack evaluation. Provides AWS Config rule and +// AWS resource type that was evaluated, the compliance of the conformance pack, +// related time stamps, and supplementary information. +type ConformancePackEvaluationResult struct { + _ struct{} `type:"structure"` + + // Supplementary information about how the evaluation determined the compliance. + Annotation *string `type:"string"` + + // Filters the results by compliance. + // + // The allowed values are COMPLIANT and NON_COMPLIANT. + // + // ComplianceType is a required field + ComplianceType ConformancePackComplianceType `type:"string" required:"true" enum:"true"` + + // The time when AWS Config rule evaluated AWS resource. + // + // ConfigRuleInvokedTime is a required field + ConfigRuleInvokedTime *time.Time `type:"timestamp" required:"true"` + + // Uniquely identifies an evaluation result. + // + // EvaluationResultIdentifier is a required field + EvaluationResultIdentifier *EvaluationResultIdentifier `type:"structure" required:"true"` + + // The time when AWS Config recorded the evaluation result. + // + // ResultRecordedTime is a required field + ResultRecordedTime *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s ConformancePackEvaluationResult) String() string { + return awsutil.Prettify(s) +} + +// Input parameters in the form of key-value pairs for the conformance pack, +// both of which you define. Keys can have a maximum character length of 128 +// characters, and values can have a maximum length of 256 characters. +type ConformancePackInputParameter struct { + _ struct{} `type:"structure"` + + // One part of a key-value pair. + // + // ParameterName is a required field + ParameterName *string `type:"string" required:"true"` + + // Another part of the key-value pair. + // + // ParameterValue is a required field + ParameterValue *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConformancePackInputParameter) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConformancePackInputParameter) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ConformancePackInputParameter"} + + if s.ParameterName == nil { + invalidParams.Add(aws.NewErrParamRequired("ParameterName")) + } + + if s.ParameterValue == nil { + invalidParams.Add(aws.NewErrParamRequired("ParameterValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Compliance information of one or more AWS Config rules within a conformance +// pack. You can filter using AWS Config rule names and compliance types. +type ConformancePackRuleCompliance struct { + _ struct{} `type:"structure"` + + // Filters the results by compliance. + // + // The allowed values are COMPLIANT and NON_COMPLIANT. + ComplianceType ConformancePackComplianceType `type:"string" enum:"true"` + + // Filters the results by AWS Config rule name. + ConfigRuleName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ConformancePackRuleCompliance) String() string { + return awsutil.Prettify(s) +} + +// Status details of a conformance pack. +type ConformancePackStatusDetail struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of comformance pack. + // + // ConformancePackArn is a required field + ConformancePackArn *string `min:"1" type:"string" required:"true"` + + // ID of the conformance pack. + // + // ConformancePackId is a required field + ConformancePackId *string `min:"1" type:"string" required:"true"` + + // Name of the conformance pack. + // + // ConformancePackName is a required field + ConformancePackName *string `min:"1" type:"string" required:"true"` + + // Indicates deployment status of conformance pack. + // + // AWS Config sets the state of the conformance pack to: + // + // * CREATE_IN_PROGRESS when a conformance pack creation is in progress for + // an account. + // + // * CREATE_COMPLETE when a conformance pack has been successfully created + // in your account. + // + // * CREATE_FAILED when a conformance pack creation failed in your account. + // + // * DELETE_IN_PROGRESS when a conformance pack deletion is in progress. + // + // * DELETE_FAILED when a conformance pack deletion failed from your account. + // + // ConformancePackState is a required field + ConformancePackState ConformancePackState `type:"string" required:"true" enum:"true"` + + // The reason of conformance pack creation failure. + ConformancePackStatusReason *string `type:"string"` + + // Last time when conformation pack creation and update was successful. + LastUpdateCompletedTime *time.Time `type:"timestamp"` + + // Last time when conformation pack creation and update was requested. + // + // LastUpdateRequestedTime is a required field + LastUpdateRequestedTime *time.Time `type:"timestamp" required:"true"` + + // Amazon Resource Name (ARN) of AWS CloudFormation stack. + // + // StackArn is a required field + StackArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConformancePackStatusDetail) String() string { + return awsutil.Prettify(s) +} + // The channel through which AWS Config delivers notifications and updated configuration // states. type DeliveryChannel struct { @@ -1449,7 +1719,7 @@ type OrganizationConfigRule struct { // The timestamp of the last update. LastUpdateTime *time.Time `type:"timestamp"` - // The Amazon Resource Name (ARN) of organization config rule. + // Amazon Resource Name (ARN) of organization config rule. // // OrganizationConfigRuleArn is a required field OrganizationConfigRuleArn *string `min:"1" type:"string" required:"true"` @@ -1535,6 +1805,188 @@ func (s OrganizationConfigRuleStatus) String() string { return awsutil.Prettify(s) } +// An organization conformance pack that has information about conformance packs +// that AWS Config creates in member accounts. +type OrganizationConformancePack struct { + _ struct{} `type:"structure"` + + // A list of ConformancePackInputParameter objects. + ConformancePackInputParameters []ConformancePackInputParameter `type:"list"` + + // Location of an Amazon S3 bucket where AWS Config can deliver evaluation results + // and conformance pack template that is used to create a pack. + // + // DeliveryS3Bucket is a required field + DeliveryS3Bucket *string `min:"3" type:"string" required:"true"` + + // Any folder structure you want to add to an Amazon S3 bucket. + DeliveryS3KeyPrefix *string `min:"1" type:"string"` + + // A comma-separated list of accounts excluded from organization conformance + // pack. + ExcludedAccounts []string `type:"list"` + + // Last time when organization conformation pack was updated. + // + // LastUpdateTime is a required field + LastUpdateTime *time.Time `type:"timestamp" required:"true"` + + // Amazon Resource Name (ARN) of organization conformance pack. + // + // OrganizationConformancePackArn is a required field + OrganizationConformancePackArn *string `min:"1" type:"string" required:"true"` + + // The name you assign to an organization conformance pack. + // + // OrganizationConformancePackName is a required field + OrganizationConformancePackName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s OrganizationConformancePack) String() string { + return awsutil.Prettify(s) +} + +// Organization conformance pack creation or deletion status in each member +// account. This includes the name of the conformance pack, the status, error +// code and error message when the conformance pack creation or deletion failed. +type OrganizationConformancePackDetailedStatus struct { + _ struct{} `type:"structure"` + + // The 12-digit account ID of a member account. + // + // AccountId is a required field + AccountId *string `type:"string" required:"true"` + + // The name of conformance pack deployed in the member account. + // + // ConformancePackName is a required field + ConformancePackName *string `min:"1" type:"string" required:"true"` + + // An error code that is returned when conformance pack creation or deletion + // failed in the member account. + ErrorCode *string `type:"string"` + + // An error message indicating that conformance pack account creation or deletion + // has failed due to an error in the member account. + ErrorMessage *string `type:"string"` + + // The timestamp of the last status update. + LastUpdateTime *time.Time `type:"timestamp"` + + // Indicates deployment status for conformance pack in a member account. When + // master account calls PutOrganizationConformancePack action for the first + // time, conformance pack status is created in the member account. When master + // account calls PutOrganizationConformancePack action for the second time, + // conformance pack status is updated in the member account. Conformance pack + // status is deleted when the master account deletes OrganizationConformancePack + // and disables service access for config-multiaccountsetup.amazonaws.com. + // + // AWS Config sets the state of the conformance pack to: + // + // * CREATE_SUCCESSFUL when conformance pack has been created in the member + // account. + // + // * CREATE_IN_PROGRESS when conformance pack is being created in the member + // account. + // + // * CREATE_FAILED when conformance pack creation has failed in the member + // account. + // + // * DELETE_FAILED when conformance pack deletion has failed in the member + // account. + // + // * DELETE_IN_PROGRESS when conformance pack is being deleted in the member + // account. + // + // * DELETE_SUCCESSFUL when conformance pack has been deleted in the member + // account. + // + // * UPDATE_SUCCESSFUL when conformance pack has been updated in the member + // account. + // + // * UPDATE_IN_PROGRESS when conformance pack is being updated in the member + // account. + // + // * UPDATE_FAILED when conformance pack deletion has failed in the member + // account. + // + // Status is a required field + Status OrganizationResourceDetailedStatus `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s OrganizationConformancePackDetailedStatus) String() string { + return awsutil.Prettify(s) +} + +// Returns the status for an organization conformance pack in an organization. +type OrganizationConformancePackStatus struct { + _ struct{} `type:"structure"` + + // An error code that is returned when organization conformance pack creation + // or deletion has failed in the member account. + ErrorCode *string `type:"string"` + + // An error message indicating that organization conformance pack creation or + // deletion failed due to an error. + ErrorMessage *string `type:"string"` + + // The timestamp of the last update. + LastUpdateTime *time.Time `type:"timestamp"` + + // The name that you assign to organization conformance pack. + // + // OrganizationConformancePackName is a required field + OrganizationConformancePackName *string `min:"1" type:"string" required:"true"` + + // Indicates deployment status of an organization conformance pack. When master + // account calls PutOrganizationConformancePack for the first time, conformance + // pack status is created in all the member accounts. When master account calls + // PutOrganizationConformancePack for the second time, conformance pack status + // is updated in all the member accounts. Additionally, conformance pack status + // is updated when one or more member accounts join or leave an organization. + // Conformance pack status is deleted when the master account deletes OrganizationConformancePack + // in all the member accounts and disables service access for config-multiaccountsetup.amazonaws.com. + // + // AWS Config sets the state of the conformance pack to: + // + // * CREATE_SUCCESSFUL when an organization conformance pack has been successfully + // created in all the member accounts. + // + // * CREATE_IN_PROGRESS when an organization conformance pack creation is + // in progress. + // + // * CREATE_FAILED when an organization conformance pack creation failed + // in one or more member accounts within that organization. + // + // * DELETE_FAILED when an organization conformance pack deletion failed + // in one or more member accounts within that organization. + // + // * DELETE_IN_PROGRESS when an organization conformance pack deletion is + // in progress. + // + // * DELETE_SUCCESSFUL when an organization conformance pack has been successfully + // deleted from all the member accounts. + // + // * UPDATE_SUCCESSFUL when an organization conformance pack has been successfully + // updated in all the member accounts. + // + // * UPDATE_IN_PROGRESS when an organization conformance pack update is in + // progress. + // + // * UPDATE_FAILED when an organization conformance pack update failed in + // one or more member accounts within that organization. + // + // Status is a required field + Status OrganizationResourceStatus `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s OrganizationConformancePackStatus) String() string { + return awsutil.Prettify(s) +} + // An object that specifies organization custom rule metadata such as resource // type, resource ID of AWS resource, Lamdba function ARN, and organization // trigger types that trigger AWS Config to evaluate your AWS resources against @@ -1712,6 +2164,58 @@ func (s *OrganizationManagedRuleMetadata) Validate() error { return nil } +// Status filter object to filter results based on specific member account ID +// or status type for an organization conformance pack. +type OrganizationResourceDetailedStatusFilters struct { + _ struct{} `type:"structure"` + + // The 12-digit account ID of the member account within an organization. + AccountId *string `type:"string"` + + // Indicates deployment status for conformance pack in a member account. When + // master account calls PutOrganizationConformancePack action for the first + // time, conformance pack status is created in the member account. When master + // account calls PutOrganizationConformancePack action for the second time, + // conformance pack status is updated in the member account. Conformance pack + // status is deleted when the master account deletes OrganizationConformancePack + // and disables service access for config-multiaccountsetup.amazonaws.com. + // + // AWS Config sets the state of the conformance pack to: + // + // * CREATE_SUCCESSFUL when conformance pack has been created in the member + // account. + // + // * CREATE_IN_PROGRESS when conformance pack is being created in the member + // account. + // + // * CREATE_FAILED when conformance pack creation has failed in the member + // account. + // + // * DELETE_FAILED when conformance pack deletion has failed in the member + // account. + // + // * DELETE_IN_PROGRESS when conformance pack is being deleted in the member + // account. + // + // * DELETE_SUCCESSFUL when conformance pack has been deleted in the member + // account. + // + // * UPDATE_SUCCESSFUL when conformance pack has been updated in the member + // account. + // + // * UPDATE_IN_PROGRESS when conformance pack is being updated in the member + // account. + // + // * UPDATE_FAILED when conformance pack deletion has failed in the member + // account. + Status OrganizationResourceDetailedStatus `type:"string" enum:"true"` +} + +// String returns the string representation +func (s OrganizationResourceDetailedStatusFilters) String() string { + return awsutil.Prettify(s) +} + // An object that represents the account ID and region of an aggregator account // that is requesting authorization but is not yet authorized. type PendingAggregationRequest struct { diff --git a/service/configservice/configserviceiface/interface.go b/service/configservice/configserviceiface/interface.go index 656f34b1da8..4e1d7b5c603 100644 --- a/service/configservice/configserviceiface/interface.go +++ b/service/configservice/configserviceiface/interface.go @@ -73,12 +73,16 @@ type ClientAPI interface { DeleteConfigurationRecorderRequest(*configservice.DeleteConfigurationRecorderInput) configservice.DeleteConfigurationRecorderRequest + DeleteConformancePackRequest(*configservice.DeleteConformancePackInput) configservice.DeleteConformancePackRequest + DeleteDeliveryChannelRequest(*configservice.DeleteDeliveryChannelInput) configservice.DeleteDeliveryChannelRequest DeleteEvaluationResultsRequest(*configservice.DeleteEvaluationResultsInput) configservice.DeleteEvaluationResultsRequest DeleteOrganizationConfigRuleRequest(*configservice.DeleteOrganizationConfigRuleInput) configservice.DeleteOrganizationConfigRuleRequest + DeleteOrganizationConformancePackRequest(*configservice.DeleteOrganizationConformancePackInput) configservice.DeleteOrganizationConformancePackRequest + DeletePendingAggregationRequestRequest(*configservice.DeletePendingAggregationRequestInput) configservice.DeletePendingAggregationRequestRequest DeleteRemediationConfigurationRequest(*configservice.DeleteRemediationConfigurationInput) configservice.DeleteRemediationConfigurationRequest @@ -109,6 +113,12 @@ type ClientAPI interface { DescribeConfigurationRecordersRequest(*configservice.DescribeConfigurationRecordersInput) configservice.DescribeConfigurationRecordersRequest + DescribeConformancePackComplianceRequest(*configservice.DescribeConformancePackComplianceInput) configservice.DescribeConformancePackComplianceRequest + + DescribeConformancePackStatusRequest(*configservice.DescribeConformancePackStatusInput) configservice.DescribeConformancePackStatusRequest + + DescribeConformancePacksRequest(*configservice.DescribeConformancePacksInput) configservice.DescribeConformancePacksRequest + DescribeDeliveryChannelStatusRequest(*configservice.DescribeDeliveryChannelStatusInput) configservice.DescribeDeliveryChannelStatusRequest DescribeDeliveryChannelsRequest(*configservice.DescribeDeliveryChannelsInput) configservice.DescribeDeliveryChannelsRequest @@ -117,6 +127,10 @@ type ClientAPI interface { DescribeOrganizationConfigRulesRequest(*configservice.DescribeOrganizationConfigRulesInput) configservice.DescribeOrganizationConfigRulesRequest + DescribeOrganizationConformancePackStatusesRequest(*configservice.DescribeOrganizationConformancePackStatusesInput) configservice.DescribeOrganizationConformancePackStatusesRequest + + DescribeOrganizationConformancePacksRequest(*configservice.DescribeOrganizationConformancePacksInput) configservice.DescribeOrganizationConformancePacksRequest + DescribePendingAggregationRequestsRequest(*configservice.DescribePendingAggregationRequestsInput) configservice.DescribePendingAggregationRequestsRequest DescribeRemediationConfigurationsRequest(*configservice.DescribeRemediationConfigurationsInput) configservice.DescribeRemediationConfigurationsRequest @@ -143,10 +157,16 @@ type ClientAPI interface { GetComplianceSummaryByResourceTypeRequest(*configservice.GetComplianceSummaryByResourceTypeInput) configservice.GetComplianceSummaryByResourceTypeRequest + GetConformancePackComplianceDetailsRequest(*configservice.GetConformancePackComplianceDetailsInput) configservice.GetConformancePackComplianceDetailsRequest + + GetConformancePackComplianceSummaryRequest(*configservice.GetConformancePackComplianceSummaryInput) configservice.GetConformancePackComplianceSummaryRequest + GetDiscoveredResourceCountsRequest(*configservice.GetDiscoveredResourceCountsInput) configservice.GetDiscoveredResourceCountsRequest GetOrganizationConfigRuleDetailedStatusRequest(*configservice.GetOrganizationConfigRuleDetailedStatusInput) configservice.GetOrganizationConfigRuleDetailedStatusRequest + GetOrganizationConformancePackDetailedStatusRequest(*configservice.GetOrganizationConformancePackDetailedStatusInput) configservice.GetOrganizationConformancePackDetailedStatusRequest + GetResourceConfigHistoryRequest(*configservice.GetResourceConfigHistoryInput) configservice.GetResourceConfigHistoryRequest ListAggregateDiscoveredResourcesRequest(*configservice.ListAggregateDiscoveredResourcesInput) configservice.ListAggregateDiscoveredResourcesRequest @@ -163,12 +183,16 @@ type ClientAPI interface { PutConfigurationRecorderRequest(*configservice.PutConfigurationRecorderInput) configservice.PutConfigurationRecorderRequest + PutConformancePackRequest(*configservice.PutConformancePackInput) configservice.PutConformancePackRequest + PutDeliveryChannelRequest(*configservice.PutDeliveryChannelInput) configservice.PutDeliveryChannelRequest PutEvaluationsRequest(*configservice.PutEvaluationsInput) configservice.PutEvaluationsRequest PutOrganizationConfigRuleRequest(*configservice.PutOrganizationConfigRuleInput) configservice.PutOrganizationConfigRuleRequest + PutOrganizationConformancePackRequest(*configservice.PutOrganizationConformancePackInput) configservice.PutOrganizationConformancePackRequest + PutRemediationConfigurationsRequest(*configservice.PutRemediationConfigurationsInput) configservice.PutRemediationConfigurationsRequest PutRemediationExceptionsRequest(*configservice.PutRemediationExceptionsInput) configservice.PutRemediationExceptionsRequest diff --git a/service/connect/api_op_CreateUser.go b/service/connect/api_op_CreateUser.go index 62bcfd76a4c..a3d72614788 100644 --- a/service/connect/api_op_CreateUser.go +++ b/service/connect/api_op_CreateUser.go @@ -56,6 +56,9 @@ type CreateUserInput struct { // SecurityProfileIds is a required field SecurityProfileIds []string `min:"1" type:"list" required:"true"` + // One or more tags. + Tags map[string]string `min:"1" type:"map"` + // The user name for the account. For instances not using SAML for identity // management, the user name can include up to 20 characters. If you are using // SAML for identity management, the user name can include up to 64 characters @@ -95,6 +98,9 @@ func (s *CreateUserInput) Validate() error { if s.SecurityProfileIds != nil && len(s.SecurityProfileIds) < 1 { invalidParams.Add(aws.NewErrParamMinLen("SecurityProfileIds", 1)) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) + } if s.Username == nil { invalidParams.Add(aws.NewErrParamRequired("Username")) @@ -170,6 +176,18 @@ func (s CreateUserInput) MarshalFields(e protocol.FieldEncoder) error { } ls0.End() + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + } if s.Username != nil { v := *s.Username diff --git a/service/connect/api_op_DescribeUser.go b/service/connect/api_op_DescribeUser.go index 64755f41a90..0772c82f4ad 100644 --- a/service/connect/api_op_DescribeUser.go +++ b/service/connect/api_op_DescribeUser.go @@ -97,7 +97,9 @@ const opDescribeUser = "DescribeUser" // DescribeUserRequest returns a request value for making API operation for // Amazon Connect Service. // -// Describes the specified user account. +// Describes the specified user account. You can find the instance ID in the +// console (it’s the final part of the ARN). The console does not display +// the user IDs. Instead, list the users and note the IDs provided in the output. // // // Example sending a request using DescribeUserRequest. // req := client.DescribeUserRequest(params) diff --git a/service/connect/api_op_ListTagsForResource.go b/service/connect/api_op_ListTagsForResource.go new file mode 100644 index 00000000000..da9fe9beb16 --- /dev/null +++ b/service/connect/api_op_ListTagsForResource.go @@ -0,0 +1,149 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package connect + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTagsForResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTagsForResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "resourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // Information about the tags. + Tags map[string]string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTagsForResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + return nil +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest returns a request value for making API operation for +// Amazon Connect Service. +// +// Lists the tags for the specified resource. +// +// // Example sending a request using ListTagsForResourceRequest. +// req := client.ListTagsForResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/ListTagsForResource +func (c *Client) ListTagsForResourceRequest(input *ListTagsForResourceInput) ListTagsForResourceRequest { + op := &aws.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req := c.newRequest(op, input, &ListTagsForResourceOutput{}) + return ListTagsForResourceRequest{Request: req, Input: input, Copy: c.ListTagsForResourceRequest} +} + +// ListTagsForResourceRequest is the request type for the +// ListTagsForResource API operation. +type ListTagsForResourceRequest struct { + *aws.Request + Input *ListTagsForResourceInput + Copy func(*ListTagsForResourceInput) ListTagsForResourceRequest +} + +// Send marshals and sends the ListTagsForResource API request. +func (r ListTagsForResourceRequest) Send(ctx context.Context) (*ListTagsForResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTagsForResourceResponse{ + ListTagsForResourceOutput: r.Request.Data.(*ListTagsForResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListTagsForResourceResponse is the response type for the +// ListTagsForResource API operation. +type ListTagsForResourceResponse struct { + *ListTagsForResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTagsForResource request. +func (r *ListTagsForResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/connect/api_op_TagResource.go b/service/connect/api_op_TagResource.go new file mode 100644 index 00000000000..896cea90ae6 --- /dev/null +++ b/service/connect/api_op_TagResource.go @@ -0,0 +1,164 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package connect + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // One or more tags. For example, { "tags": {"key1":"value1", "key2":"value2"} + // }. + // + // Tags is a required field + Tags map[string]string `locationName:"tags" min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TagResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if s.Tags == nil { + invalidParams.Add(aws.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "resourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opTagResource = "TagResource" + +// TagResourceRequest returns a request value for making API operation for +// Amazon Connect Service. +// +// Adds the specified tags to the specified resource. +// +// The supported resource type is users. +// +// // Example sending a request using TagResourceRequest. +// req := client.TagResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/TagResource +func (c *Client) TagResourceRequest(input *TagResourceInput) TagResourceRequest { + op := &aws.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + req := c.newRequest(op, input, &TagResourceOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return TagResourceRequest{Request: req, Input: input, Copy: c.TagResourceRequest} +} + +// TagResourceRequest is the request type for the +// TagResource API operation. +type TagResourceRequest struct { + *aws.Request + Input *TagResourceInput + Copy func(*TagResourceInput) TagResourceRequest +} + +// Send marshals and sends the TagResource API request. +func (r TagResourceRequest) Send(ctx context.Context) (*TagResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &TagResourceResponse{ + TagResourceOutput: r.Request.Data.(*TagResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// TagResourceResponse is the response type for the +// TagResource API operation. +type TagResourceResponse struct { + *TagResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// TagResource request. +func (r *TagResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/connect/api_op_UntagResource.go b/service/connect/api_op_UntagResource.go new file mode 100644 index 00000000000..0ad05d066ce --- /dev/null +++ b/service/connect/api_op_UntagResource.go @@ -0,0 +1,161 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package connect + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // The tag keys. + // + // TagKeys is a required field + TagKeys []string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UntagResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if s.TagKeys == nil { + invalidParams.Add(aws.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UntagResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "resourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TagKeys != nil { + v := s.TagKeys + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.QueryTarget, "tagKeys", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UntagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest returns a request value for making API operation for +// Amazon Connect Service. +// +// Removes the specified tags from the specified resource. +// +// // Example sending a request using UntagResourceRequest. +// req := client.UntagResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/UntagResource +func (c *Client) UntagResourceRequest(input *UntagResourceInput) UntagResourceRequest { + op := &aws.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + req := c.newRequest(op, input, &UntagResourceOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UntagResourceRequest{Request: req, Input: input, Copy: c.UntagResourceRequest} +} + +// UntagResourceRequest is the request type for the +// UntagResource API operation. +type UntagResourceRequest struct { + *aws.Request + Input *UntagResourceInput + Copy func(*UntagResourceInput) UntagResourceRequest +} + +// Send marshals and sends the UntagResource API request. +func (r UntagResourceRequest) Send(ctx context.Context) (*UntagResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UntagResourceResponse{ + UntagResourceOutput: r.Request.Data.(*UntagResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UntagResourceResponse is the response type for the +// UntagResource API operation. +type UntagResourceResponse struct { + *UntagResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UntagResource request. +func (r *UntagResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/connect/api_types.go b/service/connect/api_types.go index 318fc41a8cf..e4ab9a5956f 100644 --- a/service/connect/api_types.go +++ b/service/connect/api_types.go @@ -1033,6 +1033,9 @@ type User struct { // The identifiers of the security profiles for the user. SecurityProfileIds []string `min:"1" type:"list"` + // The tags. + Tags map[string]string `min:"1" type:"map"` + // The user name assigned to the user account. Username *string `min:"1" type:"string"` } @@ -1097,6 +1100,18 @@ func (s User) MarshalFields(e protocol.FieldEncoder) error { } ls0.End() + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + } if s.Username != nil { v := *s.Username diff --git a/service/connect/connectiface/interface.go b/service/connect/connectiface/interface.go index 8a2dd721352..07f73a18de5 100644 --- a/service/connect/connectiface/interface.go +++ b/service/connect/connectiface/interface.go @@ -91,6 +91,8 @@ type ClientAPI interface { ListSecurityProfilesRequest(*connect.ListSecurityProfilesInput) connect.ListSecurityProfilesRequest + ListTagsForResourceRequest(*connect.ListTagsForResourceInput) connect.ListTagsForResourceRequest + ListUserHierarchyGroupsRequest(*connect.ListUserHierarchyGroupsInput) connect.ListUserHierarchyGroupsRequest ListUsersRequest(*connect.ListUsersInput) connect.ListUsersRequest @@ -99,6 +101,10 @@ type ClientAPI interface { StopContactRequest(*connect.StopContactInput) connect.StopContactRequest + TagResourceRequest(*connect.TagResourceInput) connect.TagResourceRequest + + UntagResourceRequest(*connect.UntagResourceInput) connect.UntagResourceRequest + UpdateContactAttributesRequest(*connect.UpdateContactAttributesInput) connect.UpdateContactAttributesRequest UpdateUserHierarchyRequest(*connect.UpdateUserHierarchyInput) connect.UpdateUserHierarchyRequest diff --git a/service/costexplorer/api_op_GetCostAndUsage.go b/service/costexplorer/api_op_GetCostAndUsage.go index 8b297b8c8a1..2795bacbe70 100644 --- a/service/costexplorer/api_op_GetCostAndUsage.go +++ b/service/costexplorer/api_op_GetCostAndUsage.go @@ -21,8 +21,6 @@ type GetCostAndUsageInput struct { // Sets the AWS cost granularity to MONTHLY or DAILY, or HOURLY. If Granularity // isn't set, the response object doesn't include the Granularity, either MONTHLY // or DAILY, or HOURLY. - // - // The GetCostAndUsageRequest operation supports only DAILY and MONTHLY granularities. Granularity Granularity `type:"string" enum:"true"` // You can group AWS costs using up to two different groups, either dimensions, @@ -31,7 +29,7 @@ type GetCostAndUsageInput struct { // When you group by tag key, you get all tag values, including empty strings. // // Valid values are AZ, INSTANCE_TYPE, LEGAL_ENTITY_NAME, LINKED_ACCOUNT, OPERATION, - // PLATFORM, PURCHASE_TYPE, SERVICE, TAGS, TENANCY, and USAGE_TYPE. + // PLATFORM, PURCHASE_TYPE, SERVICE, TAGS, TENANCY, RECORD_TYPE, and USAGE_TYPE. GroupBy []GroupDefinition `type:"list"` // Which metrics are returned in the query. For more information about blended diff --git a/service/costexplorer/api_types.go b/service/costexplorer/api_types.go index 8d252a2e029..2539db8e58b 100644 --- a/service/costexplorer/api_types.go +++ b/service/costexplorer/api_types.go @@ -1150,6 +1150,11 @@ type SavingsPlansPurchaseRecommendationDetail struct { // Savings Plans, over the length of the lookback period. EstimatedOnDemandCost *string `type:"string"` + // The estimated On-Demand costs you would expect with no additional commitment, + // based on your usage of the selected time period and the Savings Plans you + // own. + EstimatedOnDemandCostWithCurrentCommitment *string `type:"string"` + // The estimated return on investment based on the recommended Savings Plans // purchased. This is calculated as estimatedSavingsAmount/ estimatedSPCost*100. EstimatedROI *string `type:"string"` @@ -1218,6 +1223,11 @@ type SavingsPlansPurchaseRecommendationSummary struct { // purchase. EstimatedMonthlySavingsAmount *string `type:"string"` + // The estimated On-Demand costs you would expect with no additional commitment, + // based on your usage of the selected time period and the Savings Plans you + // own. + EstimatedOnDemandCostWithCurrentCommitment *string `type:"string"` + // The estimated return on investment based on the recommended Savings Plans // and estimated savings. EstimatedROI *string `type:"string"` diff --git a/service/dataexchange/api_client.go b/service/dataexchange/api_client.go new file mode 100644 index 00000000000..67d37eaf6b0 --- /dev/null +++ b/service/dataexchange/api_client.go @@ -0,0 +1,79 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +// Client provides the API operation methods for making requests to +// AWS Data Exchange. See this package's package overview docs +// for details on the service. +// +// The client's methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type Client struct { + *aws.Client +} + +// Used for custom client initialization logic +var initClient func(*Client) + +// Used for custom request initialization logic +var initRequest func(*Client, *aws.Request) + +const ( + ServiceName = "AWS Data Exchange" // Service's name + ServiceID = "DataExchange" // Service's identifier + EndpointsID = "dataexchange" // Service's Endpoint identifier +) + +// New creates a new instance of the client from the provided Config. +// +// Example: +// // Create a client from just a config. +// svc := dataexchange.New(myConfig) +func New(config aws.Config) *Client { + svc := &Client{ + Client: aws.NewClient( + config, + aws.Metadata{ + ServiceName: ServiceName, + ServiceID: ServiceID, + EndpointsID: EndpointsID, + SigningName: "dataexchange", + SigningRegion: config.Region, + APIVersion: "2017-07-25", + }, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc) + } + + return svc +} + +// newRequest creates a new request for a client operation and runs any +// custom request initialization. +func (c *Client) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(c, req) + } + + return req +} diff --git a/service/dataexchange/api_doc.go b/service/dataexchange/api_doc.go new file mode 100644 index 00000000000..ef946707ff0 --- /dev/null +++ b/service/dataexchange/api_doc.go @@ -0,0 +1,28 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package dataexchange provides the client and types for making API +// requests to AWS Data Exchange. +// +// This is the API reference for AWS Data Exchange. +// +// See https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25 for more information on this service. +// +// See dataexchange package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/dataexchange/ +// +// Using the Client +// +// To use AWS Data Exchange with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Data Exchange client for more information on +// creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/dataexchange/#New +package dataexchange diff --git a/service/dataexchange/api_enums.go b/service/dataexchange/api_enums.go new file mode 100644 index 00000000000..9291488aee4 --- /dev/null +++ b/service/dataexchange/api_enums.go @@ -0,0 +1,186 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +// The type of file your data is stored in. Currently, the supported asset type +// is S3_SNAPSHOT. +type AssetType string + +// Enum values for AssetType +const ( + AssetTypeS3Snapshot AssetType = "S3_SNAPSHOT" +) + +func (enum AssetType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum AssetType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type Code string + +// Enum values for Code +const ( + CodeAccessDeniedException Code = "ACCESS_DENIED_EXCEPTION" + CodeInternalServerException Code = "INTERNAL_SERVER_EXCEPTION" + CodeMalwareDetected Code = "MALWARE_DETECTED" + CodeResourceNotFoundException Code = "RESOURCE_NOT_FOUND_EXCEPTION" + CodeServiceQuotaExceededException Code = "SERVICE_QUOTA_EXCEEDED_EXCEPTION" + CodeValidationException Code = "VALIDATION_EXCEPTION" + CodeMalwareScanEncryptedFile Code = "MALWARE_SCAN_ENCRYPTED_FILE" +) + +func (enum Code) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum Code) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// The name of the limit that was reached. +type JobErrorLimitName string + +// Enum values for JobErrorLimitName +const ( + JobErrorLimitNameAssetsperrevision JobErrorLimitName = "Assets per revision" + JobErrorLimitNameAssetsizeinGb JobErrorLimitName = "Asset size in GB" +) + +func (enum JobErrorLimitName) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum JobErrorLimitName) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// The types of resource which the job error can apply to. +type JobErrorResourceTypes string + +// Enum values for JobErrorResourceTypes +const ( + JobErrorResourceTypesRevision JobErrorResourceTypes = "REVISION" + JobErrorResourceTypesAsset JobErrorResourceTypes = "ASSET" +) + +func (enum JobErrorResourceTypes) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum JobErrorResourceTypes) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type LimitName string + +// Enum values for LimitName +const ( + LimitNameProductsperaccount LimitName = "Products per account" + LimitNameDatasetsperaccount LimitName = "Data sets per account" + LimitNameDatasetsperproduct LimitName = "Data sets per product" + LimitNameRevisionsperdataset LimitName = "Revisions per data set" + LimitNameAssetsperrevision LimitName = "Assets per revision" + LimitNameAssetsperimportjobfromAmazonS3 LimitName = "Assets per import job from Amazon S3" + LimitNameAssetperexportjobfromAmazonS3 LimitName = "Asset per export job from Amazon S3" + LimitNameAssetsizeinGb LimitName = "Asset size in GB" + LimitNameConcurrentinprogressjobstoimportassetsfromAmazonS3 LimitName = "Concurrent in progress jobs to import assets from Amazon S3" + LimitNameConcurrentinprogressjobstoimportassetsfromasignedUrl LimitName = "Concurrent in progress jobs to import assets from a signed URL" + LimitNameConcurrentinprogressjobstoexportassetstoAmazonS3 LimitName = "Concurrent in progress jobs to export assets to Amazon S3" + LimitNameConcurrentinprogressjobstoexportassetstoasignedUrl LimitName = "Concurrent in progress jobs to export assets to a signed URL" +) + +func (enum LimitName) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum LimitName) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// A property that defines the data set as OWNED by the account (for providers) +// or ENTITLED to the account (for subscribers). When an owned data set is published +// in a product, AWS Data Exchange creates a copy of the data set. Subscribers +// can access that copy of the data set as an entitled data set. +type Origin string + +// Enum values for Origin +const ( + OriginOwned Origin = "OWNED" + OriginEntitled Origin = "ENTITLED" +) + +func (enum Origin) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum Origin) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type ResourceType string + +// Enum values for ResourceType +const ( + ResourceTypeDataSet ResourceType = "DATA_SET" + ResourceTypeRevision ResourceType = "REVISION" + ResourceTypeAsset ResourceType = "ASSET" + ResourceTypeJob ResourceType = "JOB" +) + +func (enum ResourceType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ResourceType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type State string + +// Enum values for State +const ( + StateWaiting State = "WAITING" + StateInProgress State = "IN_PROGRESS" + StateError State = "ERROR" + StateCompleted State = "COMPLETED" + StateCancelled State = "CANCELLED" + StateTimedOut State = "TIMED_OUT" +) + +func (enum State) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum State) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type Type string + +// Enum values for Type +const ( + TypeImportAssetsFromS3 Type = "IMPORT_ASSETS_FROM_S3" + TypeImportAssetFromSignedUrl Type = "IMPORT_ASSET_FROM_SIGNED_URL" + TypeExportAssetsToS3 Type = "EXPORT_ASSETS_TO_S3" + TypeExportAssetToSignedUrl Type = "EXPORT_ASSET_TO_SIGNED_URL" +) + +func (enum Type) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum Type) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} diff --git a/service/dataexchange/api_errors.go b/service/dataexchange/api_errors.go new file mode 100644 index 00000000000..3264cff683c --- /dev/null +++ b/service/dataexchange/api_errors.go @@ -0,0 +1,49 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // Access to the resource is denied. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeConflictException for service response error code + // "ConflictException". + // + // The request couldn't be completed because it conflicted with the current + // state of the resource. + ErrCodeConflictException = "ConflictException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // An exception occurred with the service. + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The resource couldn't be found. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeServiceLimitExceededException for service response error code + // "ServiceLimitExceededException". + // + // The request has exceeded the quotas imposed by the service. + ErrCodeServiceLimitExceededException = "ServiceLimitExceededException" + + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // The limit on the number of requests per second was exceeded. + ErrCodeThrottlingException = "ThrottlingException" + + // ErrCodeValidationException for service response error code + // "ValidationException". + // + // The request was invalid. + ErrCodeValidationException = "ValidationException" +) diff --git a/service/dataexchange/api_op_CancelJob.go b/service/dataexchange/api_op_CancelJob.go new file mode 100644 index 00000000000..98872d0c242 --- /dev/null +++ b/service/dataexchange/api_op_CancelJob.go @@ -0,0 +1,136 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type CancelJobInput struct { + _ struct{} `type:"structure"` + + // JobId is a required field + JobId *string `location:"uri" locationName:"JobId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelJobInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelJobInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CancelJobInput"} + + if s.JobId == nil { + invalidParams.Add(aws.NewErrParamRequired("JobId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CancelJobInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.JobId != nil { + v := *s.JobId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "JobId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CancelJobOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelJobOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CancelJobOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opCancelJob = "CancelJob" + +// CancelJobRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation cancels a job. Jobs can be cancelled only when they are in +// the WAITING state. +// +// // Example sending a request using CancelJobRequest. +// req := client.CancelJobRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/CancelJob +func (c *Client) CancelJobRequest(input *CancelJobInput) CancelJobRequest { + op := &aws.Operation{ + Name: opCancelJob, + HTTPMethod: "DELETE", + HTTPPath: "/v1/jobs/{JobId}", + } + + if input == nil { + input = &CancelJobInput{} + } + + req := c.newRequest(op, input, &CancelJobOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return CancelJobRequest{Request: req, Input: input, Copy: c.CancelJobRequest} +} + +// CancelJobRequest is the request type for the +// CancelJob API operation. +type CancelJobRequest struct { + *aws.Request + Input *CancelJobInput + Copy func(*CancelJobInput) CancelJobRequest +} + +// Send marshals and sends the CancelJob API request. +func (r CancelJobRequest) Send(ctx context.Context) (*CancelJobResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CancelJobResponse{ + CancelJobOutput: r.Request.Data.(*CancelJobOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CancelJobResponse is the response type for the +// CancelJob API operation. +type CancelJobResponse struct { + *CancelJobOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CancelJob request. +func (r *CancelJobResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_CreateDataSet.go b/service/dataexchange/api_op_CreateDataSet.go new file mode 100644 index 00000000000..6f789abc49e --- /dev/null +++ b/service/dataexchange/api_op_CreateDataSet.go @@ -0,0 +1,294 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to create a data set that contains one or more revisions. +type CreateDataSetInput struct { + _ struct{} `type:"structure"` + + // The type of file your data is stored in. Currently, the supported asset type + // is S3_SNAPSHOT. + // + // AssetType is a required field + AssetType AssetType `type:"string" required:"true" enum:"true"` + + // A description for the data set. This value can be up to 16,348 characters + // long. + // + // Description is a required field + Description *string `type:"string" required:"true"` + + // The name of the data set. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // A data set tag is an optional label that you can assign to a data set when + // you create it. Each tag consists of a key and an optional value, both of + // which you define. When you use tagging, you can also use tag-based access + // control in IAM policies to control access to these data sets and revisions. + Tags map[string]string `type:"map"` +} + +// String returns the string representation +func (s CreateDataSetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDataSetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateDataSetInput"} + if len(s.AssetType) == 0 { + invalidParams.Add(aws.NewErrParamRequired("AssetType")) + } + + if s.Description == nil { + invalidParams.Add(aws.NewErrParamRequired("Description")) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateDataSetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if len(s.AssetType) > 0 { + v := s.AssetType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.Description != nil { + v := *s.Description + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + return nil +} + +type CreateDataSetOutput struct { + _ struct{} `type:"structure"` + + // An Amazon Resource Name (ARN) that uniquely identifies an AWS resource. + Arn *string `type:"string"` + + // The type of file your data is stored in. Currently, the supported asset type + // is S3_SNAPSHOT. + AssetType AssetType `type:"string" enum:"true"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A description of a resource. + Description *string `type:"string"` + + // A unique identifier. + Id *string `type:"string"` + + // The name of the model. + Name *string `type:"string"` + + // A property that defines the data set as OWNED by the account (for providers) + // or ENTITLED to the account (for subscribers). When an owned data set is published + // in a product, AWS Data Exchange creates a copy of the data set. Subscribers + // can access that copy of the data set as an entitled data set. + Origin Origin `type:"string" enum:"true"` + + OriginDetails *OriginDetails `type:"structure"` + + // A unique identifier. + SourceId *string `type:"string"` + + Tags map[string]string `type:"map"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CreateDataSetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateDataSetOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.AssetType) > 0 { + v := s.AssetType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.Description != nil { + v := *s.Description + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Origin) > 0 { + v := s.Origin + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Origin", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.OriginDetails != nil { + v := s.OriginDetails + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "OriginDetails", v, metadata) + } + if s.SourceId != nil { + v := *s.SourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +const opCreateDataSet = "CreateDataSet" + +// CreateDataSetRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation creates a data set. +// +// // Example sending a request using CreateDataSetRequest. +// req := client.CreateDataSetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/CreateDataSet +func (c *Client) CreateDataSetRequest(input *CreateDataSetInput) CreateDataSetRequest { + op := &aws.Operation{ + Name: opCreateDataSet, + HTTPMethod: "POST", + HTTPPath: "/v1/data-sets", + } + + if input == nil { + input = &CreateDataSetInput{} + } + + req := c.newRequest(op, input, &CreateDataSetOutput{}) + return CreateDataSetRequest{Request: req, Input: input, Copy: c.CreateDataSetRequest} +} + +// CreateDataSetRequest is the request type for the +// CreateDataSet API operation. +type CreateDataSetRequest struct { + *aws.Request + Input *CreateDataSetInput + Copy func(*CreateDataSetInput) CreateDataSetRequest +} + +// Send marshals and sends the CreateDataSet API request. +func (r CreateDataSetRequest) Send(ctx context.Context) (*CreateDataSetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateDataSetResponse{ + CreateDataSetOutput: r.Request.Data.(*CreateDataSetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateDataSetResponse is the response type for the +// CreateDataSet API operation. +type CreateDataSetResponse struct { + *CreateDataSetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateDataSet request. +func (r *CreateDataSetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_CreateJob.go b/service/dataexchange/api_op_CreateJob.go new file mode 100644 index 00000000000..6e5dbf95186 --- /dev/null +++ b/service/dataexchange/api_op_CreateJob.go @@ -0,0 +1,236 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// The CreateJob request. AWS Data Exchange Jobs are asynchronous import or +// export operations used to create or copy assets. A data set owner can both +// import and export as they see fit. Someone with an entitlement to a data +// set can only export. Jobs are deleted 90 days after they are created. Created +// jobs must be started with the StartJob operation. +type CreateJobInput struct { + _ struct{} `type:"structure"` + + // The details for the CreateJob request. + // + // Details is a required field + Details *RequestDetails `type:"structure" required:"true"` + + // The type of job to be created. + // + // Type is a required field + Type Type `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s CreateJobInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateJobInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateJobInput"} + + if s.Details == nil { + invalidParams.Add(aws.NewErrParamRequired("Details")) + } + if len(s.Type) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Type")) + } + if s.Details != nil { + if err := s.Details.Validate(); err != nil { + invalidParams.AddNested("Details", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateJobInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Details != nil { + v := s.Details + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Details", v, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +type CreateJobOutput struct { + _ struct{} `type:"structure"` + + // An Amazon Resource Name (ARN) that uniquely identifies an AWS resource. + Arn *string `type:"string"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Details for the response. + Details *ResponseDetails `type:"structure"` + + Errors []JobError `type:"list"` + + // A unique identifier. + Id *string `type:"string"` + + State State `type:"string" enum:"true"` + + Type Type `type:"string" enum:"true"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CreateJobOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateJobOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.Details != nil { + v := s.Details + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Details", v, metadata) + } + if s.Errors != nil { + v := s.Errors + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Errors", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.State) > 0 { + v := s.State + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "State", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +const opCreateJob = "CreateJob" + +// CreateJobRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation creates a job. +// +// // Example sending a request using CreateJobRequest. +// req := client.CreateJobRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/CreateJob +func (c *Client) CreateJobRequest(input *CreateJobInput) CreateJobRequest { + op := &aws.Operation{ + Name: opCreateJob, + HTTPMethod: "POST", + HTTPPath: "/v1/jobs", + } + + if input == nil { + input = &CreateJobInput{} + } + + req := c.newRequest(op, input, &CreateJobOutput{}) + return CreateJobRequest{Request: req, Input: input, Copy: c.CreateJobRequest} +} + +// CreateJobRequest is the request type for the +// CreateJob API operation. +type CreateJobRequest struct { + *aws.Request + Input *CreateJobInput + Copy func(*CreateJobInput) CreateJobRequest +} + +// Send marshals and sends the CreateJob API request. +func (r CreateJobRequest) Send(ctx context.Context) (*CreateJobResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateJobResponse{ + CreateJobOutput: r.Request.Data.(*CreateJobOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateJobResponse is the response type for the +// CreateJob API operation. +type CreateJobResponse struct { + *CreateJobOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateJob request. +func (r *CreateJobResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_CreateRevision.go b/service/dataexchange/api_op_CreateRevision.go new file mode 100644 index 00000000000..593e50cac06 --- /dev/null +++ b/service/dataexchange/api_op_CreateRevision.go @@ -0,0 +1,250 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// Creates a revision for a data set. When they're created, revisions are not +// published to products, and therefore are not available to subscribers. To +// publish a revision to a data set in a product, the revision must first be +// finalized. +type CreateRevisionInput struct { + _ struct{} `type:"structure"` + + // An optional comment about the revision. + Comment *string `type:"string"` + + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // A revision tag is an optional label that you can assign to a revision when + // you create it. Each tag consists of a key and an optional value, both of + // which you define. When you use tagging, you can also use tag-based access + // control in IAM policies to control access to these data sets and revisions. + Tags map[string]string `type:"map"` +} + +// String returns the string representation +func (s CreateRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRevisionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateRevisionInput"} + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateRevisionInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Comment != nil { + v := *s.Comment + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Comment", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateRevisionOutput struct { + _ struct{} `type:"structure"` + + // An Amazon Resource Name (ARN) that uniquely identifies an AWS resource. + Arn *string `type:"string"` + + Comment *string `type:"string"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A unique identifier. + DataSetId *string `type:"string"` + + Finalized *bool `type:"boolean"` + + // A unique identifier. + Id *string `type:"string"` + + // A unique identifier. + SourceId *string `type:"string"` + + Tags map[string]string `type:"map"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CreateRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateRevisionOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Comment != nil { + v := *s.Comment + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Comment", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Finalized != nil { + v := *s.Finalized + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Finalized", protocol.BoolValue(v), metadata) + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SourceId != nil { + v := *s.SourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +const opCreateRevision = "CreateRevision" + +// CreateRevisionRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation creates a revision for a data set. +// +// // Example sending a request using CreateRevisionRequest. +// req := client.CreateRevisionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/CreateRevision +func (c *Client) CreateRevisionRequest(input *CreateRevisionInput) CreateRevisionRequest { + op := &aws.Operation{ + Name: opCreateRevision, + HTTPMethod: "POST", + HTTPPath: "/v1/data-sets/{DataSetId}/revisions", + } + + if input == nil { + input = &CreateRevisionInput{} + } + + req := c.newRequest(op, input, &CreateRevisionOutput{}) + return CreateRevisionRequest{Request: req, Input: input, Copy: c.CreateRevisionRequest} +} + +// CreateRevisionRequest is the request type for the +// CreateRevision API operation. +type CreateRevisionRequest struct { + *aws.Request + Input *CreateRevisionInput + Copy func(*CreateRevisionInput) CreateRevisionRequest +} + +// Send marshals and sends the CreateRevision API request. +func (r CreateRevisionRequest) Send(ctx context.Context) (*CreateRevisionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateRevisionResponse{ + CreateRevisionOutput: r.Request.Data.(*CreateRevisionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateRevisionResponse is the response type for the +// CreateRevision API operation. +type CreateRevisionResponse struct { + *CreateRevisionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateRevision request. +func (r *CreateRevisionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_DeleteAsset.go b/service/dataexchange/api_op_DeleteAsset.go new file mode 100644 index 00000000000..283328ee7c7 --- /dev/null +++ b/service/dataexchange/api_op_DeleteAsset.go @@ -0,0 +1,161 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type DeleteAssetInput struct { + _ struct{} `type:"structure"` + + // AssetId is a required field + AssetId *string `location:"uri" locationName:"AssetId" type:"string" required:"true"` + + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // RevisionId is a required field + RevisionId *string `location:"uri" locationName:"RevisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAssetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAssetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteAssetInput"} + + if s.AssetId == nil { + invalidParams.Add(aws.NewErrParamRequired("AssetId")) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteAssetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AssetId != nil { + v := *s.AssetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AssetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteAssetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAssetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteAssetOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteAsset = "DeleteAsset" + +// DeleteAssetRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation deletes an asset. +// +// // Example sending a request using DeleteAssetRequest. +// req := client.DeleteAssetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/DeleteAsset +func (c *Client) DeleteAssetRequest(input *DeleteAssetInput) DeleteAssetRequest { + op := &aws.Operation{ + Name: opDeleteAsset, + HTTPMethod: "DELETE", + HTTPPath: "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets/{AssetId}", + } + + if input == nil { + input = &DeleteAssetInput{} + } + + req := c.newRequest(op, input, &DeleteAssetOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteAssetRequest{Request: req, Input: input, Copy: c.DeleteAssetRequest} +} + +// DeleteAssetRequest is the request type for the +// DeleteAsset API operation. +type DeleteAssetRequest struct { + *aws.Request + Input *DeleteAssetInput + Copy func(*DeleteAssetInput) DeleteAssetRequest +} + +// Send marshals and sends the DeleteAsset API request. +func (r DeleteAssetRequest) Send(ctx context.Context) (*DeleteAssetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteAssetResponse{ + DeleteAssetOutput: r.Request.Data.(*DeleteAssetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteAssetResponse is the response type for the +// DeleteAsset API operation. +type DeleteAssetResponse struct { + *DeleteAssetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteAsset request. +func (r *DeleteAssetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_DeleteDataSet.go b/service/dataexchange/api_op_DeleteDataSet.go new file mode 100644 index 00000000000..4d43e53d980 --- /dev/null +++ b/service/dataexchange/api_op_DeleteDataSet.go @@ -0,0 +1,135 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type DeleteDataSetInput struct { + _ struct{} `type:"structure"` + + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDataSetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDataSetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteDataSetInput"} + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteDataSetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteDataSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDataSetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteDataSetOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteDataSet = "DeleteDataSet" + +// DeleteDataSetRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation deletes a data set. +// +// // Example sending a request using DeleteDataSetRequest. +// req := client.DeleteDataSetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/DeleteDataSet +func (c *Client) DeleteDataSetRequest(input *DeleteDataSetInput) DeleteDataSetRequest { + op := &aws.Operation{ + Name: opDeleteDataSet, + HTTPMethod: "DELETE", + HTTPPath: "/v1/data-sets/{DataSetId}", + } + + if input == nil { + input = &DeleteDataSetInput{} + } + + req := c.newRequest(op, input, &DeleteDataSetOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteDataSetRequest{Request: req, Input: input, Copy: c.DeleteDataSetRequest} +} + +// DeleteDataSetRequest is the request type for the +// DeleteDataSet API operation. +type DeleteDataSetRequest struct { + *aws.Request + Input *DeleteDataSetInput + Copy func(*DeleteDataSetInput) DeleteDataSetRequest +} + +// Send marshals and sends the DeleteDataSet API request. +func (r DeleteDataSetRequest) Send(ctx context.Context) (*DeleteDataSetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteDataSetResponse{ + DeleteDataSetOutput: r.Request.Data.(*DeleteDataSetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteDataSetResponse is the response type for the +// DeleteDataSet API operation. +type DeleteDataSetResponse struct { + *DeleteDataSetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteDataSet request. +func (r *DeleteDataSetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_DeleteRevision.go b/service/dataexchange/api_op_DeleteRevision.go new file mode 100644 index 00000000000..e5cc7b23cde --- /dev/null +++ b/service/dataexchange/api_op_DeleteRevision.go @@ -0,0 +1,148 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type DeleteRevisionInput struct { + _ struct{} `type:"structure"` + + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // RevisionId is a required field + RevisionId *string `location:"uri" locationName:"RevisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRevisionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteRevisionInput"} + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteRevisionInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteRevisionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteRevisionOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteRevision = "DeleteRevision" + +// DeleteRevisionRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation deletes a revision. +// +// // Example sending a request using DeleteRevisionRequest. +// req := client.DeleteRevisionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/DeleteRevision +func (c *Client) DeleteRevisionRequest(input *DeleteRevisionInput) DeleteRevisionRequest { + op := &aws.Operation{ + Name: opDeleteRevision, + HTTPMethod: "DELETE", + HTTPPath: "/v1/data-sets/{DataSetId}/revisions/{RevisionId}", + } + + if input == nil { + input = &DeleteRevisionInput{} + } + + req := c.newRequest(op, input, &DeleteRevisionOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteRevisionRequest{Request: req, Input: input, Copy: c.DeleteRevisionRequest} +} + +// DeleteRevisionRequest is the request type for the +// DeleteRevision API operation. +type DeleteRevisionRequest struct { + *aws.Request + Input *DeleteRevisionInput + Copy func(*DeleteRevisionInput) DeleteRevisionRequest +} + +// Send marshals and sends the DeleteRevision API request. +func (r DeleteRevisionRequest) Send(ctx context.Context) (*DeleteRevisionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteRevisionResponse{ + DeleteRevisionOutput: r.Request.Data.(*DeleteRevisionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteRevisionResponse is the response type for the +// DeleteRevision API operation. +type DeleteRevisionResponse struct { + *DeleteRevisionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteRevision request. +func (r *DeleteRevisionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_GetAsset.go b/service/dataexchange/api_op_GetAsset.go new file mode 100644 index 00000000000..77885d308b0 --- /dev/null +++ b/service/dataexchange/api_op_GetAsset.go @@ -0,0 +1,253 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetAssetInput struct { + _ struct{} `type:"structure"` + + // AssetId is a required field + AssetId *string `location:"uri" locationName:"AssetId" type:"string" required:"true"` + + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // RevisionId is a required field + RevisionId *string `location:"uri" locationName:"RevisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAssetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAssetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetAssetInput"} + + if s.AssetId == nil { + invalidParams.Add(aws.NewErrParamRequired("AssetId")) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetAssetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AssetId != nil { + v := *s.AssetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AssetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetAssetOutput struct { + _ struct{} `type:"structure"` + + // An Amazon Resource Name (ARN) that uniquely identifies an AWS resource. + Arn *string `type:"string"` + + AssetDetails *AssetDetails `type:"structure"` + + // The type of file your data is stored in. Currently, the supported asset type + // is S3_SNAPSHOT. + AssetType AssetType `type:"string" enum:"true"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A unique identifier. + DataSetId *string `type:"string"` + + // A unique identifier. + Id *string `type:"string"` + + // The name of the asset. When importing from Amazon S3, the S3 object key is + // used as the asset name. When exporting to Amazon S3, the asset name is used + // as default target S3 object key. + Name *string `type:"string"` + + // A unique identifier. + RevisionId *string `type:"string"` + + // A unique identifier. + SourceId *string `type:"string"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetAssetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetAssetOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AssetDetails != nil { + v := s.AssetDetails + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "AssetDetails", v, metadata) + } + if len(s.AssetType) > 0 { + v := s.AssetType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SourceId != nil { + v := *s.SourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +const opGetAsset = "GetAsset" + +// GetAssetRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation returns information about an asset. +// +// // Example sending a request using GetAssetRequest. +// req := client.GetAssetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/GetAsset +func (c *Client) GetAssetRequest(input *GetAssetInput) GetAssetRequest { + op := &aws.Operation{ + Name: opGetAsset, + HTTPMethod: "GET", + HTTPPath: "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets/{AssetId}", + } + + if input == nil { + input = &GetAssetInput{} + } + + req := c.newRequest(op, input, &GetAssetOutput{}) + return GetAssetRequest{Request: req, Input: input, Copy: c.GetAssetRequest} +} + +// GetAssetRequest is the request type for the +// GetAsset API operation. +type GetAssetRequest struct { + *aws.Request + Input *GetAssetInput + Copy func(*GetAssetInput) GetAssetRequest +} + +// Send marshals and sends the GetAsset API request. +func (r GetAssetRequest) Send(ctx context.Context) (*GetAssetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetAssetResponse{ + GetAssetOutput: r.Request.Data.(*GetAssetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetAssetResponse is the response type for the +// GetAsset API operation. +type GetAssetResponse struct { + *GetAssetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetAsset request. +func (r *GetAssetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_GetDataSet.go b/service/dataexchange/api_op_GetDataSet.go new file mode 100644 index 00000000000..85677adc781 --- /dev/null +++ b/service/dataexchange/api_op_GetDataSet.go @@ -0,0 +1,242 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetDataSetInput struct { + _ struct{} `type:"structure"` + + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDataSetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDataSetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetDataSetInput"} + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDataSetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetDataSetOutput struct { + _ struct{} `type:"structure"` + + // An Amazon Resource Name (ARN) that uniquely identifies an AWS resource. + Arn *string `type:"string"` + + // The type of file your data is stored in. Currently, the supported asset type + // is S3_SNAPSHOT. + AssetType AssetType `type:"string" enum:"true"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A description of a resource. + Description *string `type:"string"` + + // A unique identifier. + Id *string `type:"string"` + + // The name of the model. + Name *string `type:"string"` + + // A property that defines the data set as OWNED by the account (for providers) + // or ENTITLED to the account (for subscribers). When an owned data set is published + // in a product, AWS Data Exchange creates a copy of the data set. Subscribers + // can access that copy of the data set as an entitled data set. + Origin Origin `type:"string" enum:"true"` + + OriginDetails *OriginDetails `type:"structure"` + + // A unique identifier. + SourceId *string `type:"string"` + + Tags map[string]string `type:"map"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetDataSetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDataSetOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.AssetType) > 0 { + v := s.AssetType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.Description != nil { + v := *s.Description + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Origin) > 0 { + v := s.Origin + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Origin", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.OriginDetails != nil { + v := s.OriginDetails + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "OriginDetails", v, metadata) + } + if s.SourceId != nil { + v := *s.SourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +const opGetDataSet = "GetDataSet" + +// GetDataSetRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation returns information about a data set. +// +// // Example sending a request using GetDataSetRequest. +// req := client.GetDataSetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/GetDataSet +func (c *Client) GetDataSetRequest(input *GetDataSetInput) GetDataSetRequest { + op := &aws.Operation{ + Name: opGetDataSet, + HTTPMethod: "GET", + HTTPPath: "/v1/data-sets/{DataSetId}", + } + + if input == nil { + input = &GetDataSetInput{} + } + + req := c.newRequest(op, input, &GetDataSetOutput{}) + return GetDataSetRequest{Request: req, Input: input, Copy: c.GetDataSetRequest} +} + +// GetDataSetRequest is the request type for the +// GetDataSet API operation. +type GetDataSetRequest struct { + *aws.Request + Input *GetDataSetInput + Copy func(*GetDataSetInput) GetDataSetRequest +} + +// Send marshals and sends the GetDataSet API request. +func (r GetDataSetRequest) Send(ctx context.Context) (*GetDataSetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetDataSetResponse{ + GetDataSetOutput: r.Request.Data.(*GetDataSetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetDataSetResponse is the response type for the +// GetDataSet API operation. +type GetDataSetResponse struct { + *GetDataSetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetDataSet request. +func (r *GetDataSetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_GetJob.go b/service/dataexchange/api_op_GetJob.go new file mode 100644 index 00000000000..1908ae3aa62 --- /dev/null +++ b/service/dataexchange/api_op_GetJob.go @@ -0,0 +1,210 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetJobInput struct { + _ struct{} `type:"structure"` + + // JobId is a required field + JobId *string `location:"uri" locationName:"JobId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetJobInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetJobInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetJobInput"} + + if s.JobId == nil { + invalidParams.Add(aws.NewErrParamRequired("JobId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetJobInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.JobId != nil { + v := *s.JobId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "JobId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetJobOutput struct { + _ struct{} `type:"structure"` + + // An Amazon Resource Name (ARN) that uniquely identifies an AWS resource. + Arn *string `type:"string"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Details for the response. + Details *ResponseDetails `type:"structure"` + + Errors []JobError `type:"list"` + + // A unique identifier. + Id *string `type:"string"` + + State State `type:"string" enum:"true"` + + Type Type `type:"string" enum:"true"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetJobOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetJobOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.Details != nil { + v := s.Details + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Details", v, metadata) + } + if s.Errors != nil { + v := s.Errors + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Errors", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.State) > 0 { + v := s.State + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "State", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +const opGetJob = "GetJob" + +// GetJobRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation returns information about a job. +// +// // Example sending a request using GetJobRequest. +// req := client.GetJobRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/GetJob +func (c *Client) GetJobRequest(input *GetJobInput) GetJobRequest { + op := &aws.Operation{ + Name: opGetJob, + HTTPMethod: "GET", + HTTPPath: "/v1/jobs/{JobId}", + } + + if input == nil { + input = &GetJobInput{} + } + + req := c.newRequest(op, input, &GetJobOutput{}) + return GetJobRequest{Request: req, Input: input, Copy: c.GetJobRequest} +} + +// GetJobRequest is the request type for the +// GetJob API operation. +type GetJobRequest struct { + *aws.Request + Input *GetJobInput + Copy func(*GetJobInput) GetJobRequest +} + +// Send marshals and sends the GetJob API request. +func (r GetJobRequest) Send(ctx context.Context) (*GetJobResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetJobResponse{ + GetJobOutput: r.Request.Data.(*GetJobOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetJobResponse is the response type for the +// GetJob API operation. +type GetJobResponse struct { + *GetJobOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetJob request. +func (r *GetJobResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_GetRevision.go b/service/dataexchange/api_op_GetRevision.go new file mode 100644 index 00000000000..c54737edfa0 --- /dev/null +++ b/service/dataexchange/api_op_GetRevision.go @@ -0,0 +1,232 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetRevisionInput struct { + _ struct{} `type:"structure"` + + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // RevisionId is a required field + RevisionId *string `location:"uri" locationName:"RevisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRevisionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetRevisionInput"} + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetRevisionInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetRevisionOutput struct { + _ struct{} `type:"structure"` + + // An Amazon Resource Name (ARN) that uniquely identifies an AWS resource. + Arn *string `type:"string"` + + Comment *string `type:"string"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A unique identifier. + DataSetId *string `type:"string"` + + Finalized *bool `type:"boolean"` + + // A unique identifier. + Id *string `type:"string"` + + // A unique identifier. + SourceId *string `type:"string"` + + Tags map[string]string `type:"map"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetRevisionOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Comment != nil { + v := *s.Comment + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Comment", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Finalized != nil { + v := *s.Finalized + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Finalized", protocol.BoolValue(v), metadata) + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SourceId != nil { + v := *s.SourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +const opGetRevision = "GetRevision" + +// GetRevisionRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation returns information about a revision. +// +// // Example sending a request using GetRevisionRequest. +// req := client.GetRevisionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/GetRevision +func (c *Client) GetRevisionRequest(input *GetRevisionInput) GetRevisionRequest { + op := &aws.Operation{ + Name: opGetRevision, + HTTPMethod: "GET", + HTTPPath: "/v1/data-sets/{DataSetId}/revisions/{RevisionId}", + } + + if input == nil { + input = &GetRevisionInput{} + } + + req := c.newRequest(op, input, &GetRevisionOutput{}) + return GetRevisionRequest{Request: req, Input: input, Copy: c.GetRevisionRequest} +} + +// GetRevisionRequest is the request type for the +// GetRevision API operation. +type GetRevisionRequest struct { + *aws.Request + Input *GetRevisionInput + Copy func(*GetRevisionInput) GetRevisionRequest +} + +// Send marshals and sends the GetRevision API request. +func (r GetRevisionRequest) Send(ctx context.Context) (*GetRevisionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetRevisionResponse{ + GetRevisionOutput: r.Request.Data.(*GetRevisionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetRevisionResponse is the response type for the +// GetRevision API operation. +type GetRevisionResponse struct { + *GetRevisionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetRevision request. +func (r *GetRevisionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_ListDataSetRevisions.go b/service/dataexchange/api_op_ListDataSetRevisions.go new file mode 100644 index 00000000000..bac3df3af79 --- /dev/null +++ b/service/dataexchange/api_op_ListDataSetRevisions.go @@ -0,0 +1,229 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListDataSetRevisionsInput struct { + _ struct{} `type:"structure"` + + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDataSetRevisionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDataSetRevisionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListDataSetRevisionsInput"} + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDataSetRevisionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "maxResults", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListDataSetRevisionsOutput struct { + _ struct{} `type:"structure"` + + // The token value retrieved from a previous call to access the next page of + // results. + NextToken *string `type:"string"` + + Revisions []RevisionEntry `type:"list"` +} + +// String returns the string representation +func (s ListDataSetRevisionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDataSetRevisionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Revisions != nil { + v := s.Revisions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Revisions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opListDataSetRevisions = "ListDataSetRevisions" + +// ListDataSetRevisionsRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation lists a data set's revisions sorted by CreatedAt in descending +// order. +// +// // Example sending a request using ListDataSetRevisionsRequest. +// req := client.ListDataSetRevisionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/ListDataSetRevisions +func (c *Client) ListDataSetRevisionsRequest(input *ListDataSetRevisionsInput) ListDataSetRevisionsRequest { + op := &aws.Operation{ + Name: opListDataSetRevisions, + HTTPMethod: "GET", + HTTPPath: "/v1/data-sets/{DataSetId}/revisions", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDataSetRevisionsInput{} + } + + req := c.newRequest(op, input, &ListDataSetRevisionsOutput{}) + return ListDataSetRevisionsRequest{Request: req, Input: input, Copy: c.ListDataSetRevisionsRequest} +} + +// ListDataSetRevisionsRequest is the request type for the +// ListDataSetRevisions API operation. +type ListDataSetRevisionsRequest struct { + *aws.Request + Input *ListDataSetRevisionsInput + Copy func(*ListDataSetRevisionsInput) ListDataSetRevisionsRequest +} + +// Send marshals and sends the ListDataSetRevisions API request. +func (r ListDataSetRevisionsRequest) Send(ctx context.Context) (*ListDataSetRevisionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListDataSetRevisionsResponse{ + ListDataSetRevisionsOutput: r.Request.Data.(*ListDataSetRevisionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListDataSetRevisionsRequestPaginator returns a paginator for ListDataSetRevisions. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListDataSetRevisionsRequest(input) +// p := dataexchange.NewListDataSetRevisionsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListDataSetRevisionsPaginator(req ListDataSetRevisionsRequest) ListDataSetRevisionsPaginator { + return ListDataSetRevisionsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListDataSetRevisionsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListDataSetRevisionsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListDataSetRevisionsPaginator struct { + aws.Pager +} + +func (p *ListDataSetRevisionsPaginator) CurrentPage() *ListDataSetRevisionsOutput { + return p.Pager.CurrentPage().(*ListDataSetRevisionsOutput) +} + +// ListDataSetRevisionsResponse is the response type for the +// ListDataSetRevisions API operation. +type ListDataSetRevisionsResponse struct { + *ListDataSetRevisionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListDataSetRevisions request. +func (r *ListDataSetRevisionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_ListDataSets.go b/service/dataexchange/api_op_ListDataSets.go new file mode 100644 index 00000000000..219daf2b727 --- /dev/null +++ b/service/dataexchange/api_op_ListDataSets.go @@ -0,0 +1,225 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListDataSetsInput struct { + _ struct{} `type:"structure"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + Origin *string `location:"querystring" locationName:"origin" type:"string"` +} + +// String returns the string representation +func (s ListDataSetsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDataSetsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListDataSetsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDataSetsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "maxResults", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Origin != nil { + v := *s.Origin + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "origin", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListDataSetsOutput struct { + _ struct{} `type:"structure"` + + DataSets []DataSetEntry `type:"list"` + + // The token value retrieved from a previous call to access the next page of + // results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDataSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDataSetsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSets != nil { + v := s.DataSets + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DataSets", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListDataSets = "ListDataSets" + +// ListDataSetsRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation lists your data sets. When listing by origin OWNED, results +// are sorted by CreatedAt in descending order. When listing by origin ENTITLED, +// there is no order and the maxResults parameter is ignored. +// +// // Example sending a request using ListDataSetsRequest. +// req := client.ListDataSetsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/ListDataSets +func (c *Client) ListDataSetsRequest(input *ListDataSetsInput) ListDataSetsRequest { + op := &aws.Operation{ + Name: opListDataSets, + HTTPMethod: "GET", + HTTPPath: "/v1/data-sets", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDataSetsInput{} + } + + req := c.newRequest(op, input, &ListDataSetsOutput{}) + return ListDataSetsRequest{Request: req, Input: input, Copy: c.ListDataSetsRequest} +} + +// ListDataSetsRequest is the request type for the +// ListDataSets API operation. +type ListDataSetsRequest struct { + *aws.Request + Input *ListDataSetsInput + Copy func(*ListDataSetsInput) ListDataSetsRequest +} + +// Send marshals and sends the ListDataSets API request. +func (r ListDataSetsRequest) Send(ctx context.Context) (*ListDataSetsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListDataSetsResponse{ + ListDataSetsOutput: r.Request.Data.(*ListDataSetsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListDataSetsRequestPaginator returns a paginator for ListDataSets. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListDataSetsRequest(input) +// p := dataexchange.NewListDataSetsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListDataSetsPaginator(req ListDataSetsRequest) ListDataSetsPaginator { + return ListDataSetsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListDataSetsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListDataSetsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListDataSetsPaginator struct { + aws.Pager +} + +func (p *ListDataSetsPaginator) CurrentPage() *ListDataSetsOutput { + return p.Pager.CurrentPage().(*ListDataSetsOutput) +} + +// ListDataSetsResponse is the response type for the +// ListDataSets API operation. +type ListDataSetsResponse struct { + *ListDataSetsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListDataSets request. +func (r *ListDataSetsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_ListJobs.go b/service/dataexchange/api_op_ListJobs.go new file mode 100644 index 00000000000..6f7752b83da --- /dev/null +++ b/service/dataexchange/api_op_ListJobs.go @@ -0,0 +1,231 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListJobsInput struct { + _ struct{} `type:"structure"` + + DataSetId *string `location:"querystring" locationName:"dataSetId" type:"string"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + RevisionId *string `location:"querystring" locationName:"revisionId" type:"string"` +} + +// String returns the string representation +func (s ListJobsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListJobsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListJobsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "dataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "maxResults", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "revisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListJobsOutput struct { + _ struct{} `type:"structure"` + + Jobs []JobEntry `type:"list"` + + // The token value retrieved from a previous call to access the next page of + // results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListJobsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Jobs != nil { + v := s.Jobs + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Jobs", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListJobs = "ListJobs" + +// ListJobsRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation lists your jobs sorted by CreatedAt in descending order. +// +// // Example sending a request using ListJobsRequest. +// req := client.ListJobsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/ListJobs +func (c *Client) ListJobsRequest(input *ListJobsInput) ListJobsRequest { + op := &aws.Operation{ + Name: opListJobs, + HTTPMethod: "GET", + HTTPPath: "/v1/jobs", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJobsInput{} + } + + req := c.newRequest(op, input, &ListJobsOutput{}) + return ListJobsRequest{Request: req, Input: input, Copy: c.ListJobsRequest} +} + +// ListJobsRequest is the request type for the +// ListJobs API operation. +type ListJobsRequest struct { + *aws.Request + Input *ListJobsInput + Copy func(*ListJobsInput) ListJobsRequest +} + +// Send marshals and sends the ListJobs API request. +func (r ListJobsRequest) Send(ctx context.Context) (*ListJobsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListJobsResponse{ + ListJobsOutput: r.Request.Data.(*ListJobsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListJobsRequestPaginator returns a paginator for ListJobs. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListJobsRequest(input) +// p := dataexchange.NewListJobsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListJobsPaginator(req ListJobsRequest) ListJobsPaginator { + return ListJobsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListJobsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListJobsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListJobsPaginator struct { + aws.Pager +} + +func (p *ListJobsPaginator) CurrentPage() *ListJobsOutput { + return p.Pager.CurrentPage().(*ListJobsOutput) +} + +// ListJobsResponse is the response type for the +// ListJobs API operation. +type ListJobsResponse struct { + *ListJobsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListJobs request. +func (r *ListJobsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_ListRevisionAssets.go b/service/dataexchange/api_op_ListRevisionAssets.go new file mode 100644 index 00000000000..a48dcb36e06 --- /dev/null +++ b/service/dataexchange/api_op_ListRevisionAssets.go @@ -0,0 +1,242 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListRevisionAssetsInput struct { + _ struct{} `type:"structure"` + + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // RevisionId is a required field + RevisionId *string `location:"uri" locationName:"RevisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRevisionAssetsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRevisionAssetsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListRevisionAssetsInput"} + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRevisionAssetsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "maxResults", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListRevisionAssetsOutput struct { + _ struct{} `type:"structure"` + + Assets []AssetEntry `type:"list"` + + // The token value retrieved from a previous call to access the next page of + // results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListRevisionAssetsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListRevisionAssetsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Assets != nil { + v := s.Assets + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Assets", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListRevisionAssets = "ListRevisionAssets" + +// ListRevisionAssetsRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation lists a revision's assets sorted alphabetically in descending +// order. +// +// // Example sending a request using ListRevisionAssetsRequest. +// req := client.ListRevisionAssetsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/ListRevisionAssets +func (c *Client) ListRevisionAssetsRequest(input *ListRevisionAssetsInput) ListRevisionAssetsRequest { + op := &aws.Operation{ + Name: opListRevisionAssets, + HTTPMethod: "GET", + HTTPPath: "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRevisionAssetsInput{} + } + + req := c.newRequest(op, input, &ListRevisionAssetsOutput{}) + return ListRevisionAssetsRequest{Request: req, Input: input, Copy: c.ListRevisionAssetsRequest} +} + +// ListRevisionAssetsRequest is the request type for the +// ListRevisionAssets API operation. +type ListRevisionAssetsRequest struct { + *aws.Request + Input *ListRevisionAssetsInput + Copy func(*ListRevisionAssetsInput) ListRevisionAssetsRequest +} + +// Send marshals and sends the ListRevisionAssets API request. +func (r ListRevisionAssetsRequest) Send(ctx context.Context) (*ListRevisionAssetsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListRevisionAssetsResponse{ + ListRevisionAssetsOutput: r.Request.Data.(*ListRevisionAssetsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListRevisionAssetsRequestPaginator returns a paginator for ListRevisionAssets. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListRevisionAssetsRequest(input) +// p := dataexchange.NewListRevisionAssetsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListRevisionAssetsPaginator(req ListRevisionAssetsRequest) ListRevisionAssetsPaginator { + return ListRevisionAssetsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListRevisionAssetsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListRevisionAssetsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListRevisionAssetsPaginator struct { + aws.Pager +} + +func (p *ListRevisionAssetsPaginator) CurrentPage() *ListRevisionAssetsOutput { + return p.Pager.CurrentPage().(*ListRevisionAssetsOutput) +} + +// ListRevisionAssetsResponse is the response type for the +// ListRevisionAssets API operation. +type ListRevisionAssetsResponse struct { + *ListRevisionAssetsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListRevisionAssets request. +func (r *ListRevisionAssetsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_ListTagsForResource.go b/service/dataexchange/api_op_ListTagsForResource.go new file mode 100644 index 00000000000..979ee87bd7b --- /dev/null +++ b/service/dataexchange/api_op_ListTagsForResource.go @@ -0,0 +1,146 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resource-arn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTagsForResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTagsForResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "resource-arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + Tags map[string]string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTagsForResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + return nil +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation lists the tags on the resource. +// +// // Example sending a request using ListTagsForResourceRequest. +// req := client.ListTagsForResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/ListTagsForResource +func (c *Client) ListTagsForResourceRequest(input *ListTagsForResourceInput) ListTagsForResourceRequest { + op := &aws.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resource-arn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req := c.newRequest(op, input, &ListTagsForResourceOutput{}) + return ListTagsForResourceRequest{Request: req, Input: input, Copy: c.ListTagsForResourceRequest} +} + +// ListTagsForResourceRequest is the request type for the +// ListTagsForResource API operation. +type ListTagsForResourceRequest struct { + *aws.Request + Input *ListTagsForResourceInput + Copy func(*ListTagsForResourceInput) ListTagsForResourceRequest +} + +// Send marshals and sends the ListTagsForResource API request. +func (r ListTagsForResourceRequest) Send(ctx context.Context) (*ListTagsForResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTagsForResourceResponse{ + ListTagsForResourceOutput: r.Request.Data.(*ListTagsForResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListTagsForResourceResponse is the response type for the +// ListTagsForResource API operation. +type ListTagsForResourceResponse struct { + *ListTagsForResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTagsForResource request. +func (r *ListTagsForResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_StartJob.go b/service/dataexchange/api_op_StartJob.go new file mode 100644 index 00000000000..d77ec597029 --- /dev/null +++ b/service/dataexchange/api_op_StartJob.go @@ -0,0 +1,132 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type StartJobInput struct { + _ struct{} `type:"structure"` + + // JobId is a required field + JobId *string `location:"uri" locationName:"JobId" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartJobInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartJobInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "StartJobInput"} + + if s.JobId == nil { + invalidParams.Add(aws.NewErrParamRequired("JobId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s StartJobInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.JobId != nil { + v := *s.JobId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "JobId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type StartJobOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StartJobOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s StartJobOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opStartJob = "StartJob" + +// StartJobRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation starts a job. +// +// // Example sending a request using StartJobRequest. +// req := client.StartJobRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/StartJob +func (c *Client) StartJobRequest(input *StartJobInput) StartJobRequest { + op := &aws.Operation{ + Name: opStartJob, + HTTPMethod: "PATCH", + HTTPPath: "/v1/jobs/{JobId}", + } + + if input == nil { + input = &StartJobInput{} + } + + req := c.newRequest(op, input, &StartJobOutput{}) + return StartJobRequest{Request: req, Input: input, Copy: c.StartJobRequest} +} + +// StartJobRequest is the request type for the +// StartJob API operation. +type StartJobRequest struct { + *aws.Request + Input *StartJobInput + Copy func(*StartJobInput) StartJobRequest +} + +// Send marshals and sends the StartJob API request. +func (r StartJobRequest) Send(ctx context.Context) (*StartJobResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &StartJobResponse{ + StartJobOutput: r.Request.Data.(*StartJobOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// StartJobResponse is the response type for the +// StartJob API operation. +type StartJobResponse struct { + *StartJobOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// StartJob request. +func (r *StartJobResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_TagResource.go b/service/dataexchange/api_op_TagResource.go new file mode 100644 index 00000000000..de17a526bf9 --- /dev/null +++ b/service/dataexchange/api_op_TagResource.go @@ -0,0 +1,154 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resource-arn" type:"string" required:"true"` + + // Tags is a required field + Tags map[string]string `locationName:"tags" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TagResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if s.Tags == nil { + invalidParams.Add(aws.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "resource-arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opTagResource = "TagResource" + +// TagResourceRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation tags a resource. +// +// // Example sending a request using TagResourceRequest. +// req := client.TagResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/TagResource +func (c *Client) TagResourceRequest(input *TagResourceInput) TagResourceRequest { + op := &aws.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resource-arn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + req := c.newRequest(op, input, &TagResourceOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return TagResourceRequest{Request: req, Input: input, Copy: c.TagResourceRequest} +} + +// TagResourceRequest is the request type for the +// TagResource API operation. +type TagResourceRequest struct { + *aws.Request + Input *TagResourceInput + Copy func(*TagResourceInput) TagResourceRequest +} + +// Send marshals and sends the TagResource API request. +func (r TagResourceRequest) Send(ctx context.Context) (*TagResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &TagResourceResponse{ + TagResourceOutput: r.Request.Data.(*TagResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// TagResourceResponse is the response type for the +// TagResource API operation. +type TagResourceResponse struct { + *TagResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// TagResource request. +func (r *TagResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_UntagResource.go b/service/dataexchange/api_op_UntagResource.go new file mode 100644 index 00000000000..bc7424557e6 --- /dev/null +++ b/service/dataexchange/api_op_UntagResource.go @@ -0,0 +1,154 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resource-arn" type:"string" required:"true"` + + // TagKeys is a required field + TagKeys []string `location:"querystring" locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UntagResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if s.TagKeys == nil { + invalidParams.Add(aws.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UntagResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "resource-arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TagKeys != nil { + v := s.TagKeys + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.QueryTarget, "tagKeys", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UntagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation removes one or more tags from a resource. +// +// // Example sending a request using UntagResourceRequest. +// req := client.UntagResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/UntagResource +func (c *Client) UntagResourceRequest(input *UntagResourceInput) UntagResourceRequest { + op := &aws.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resource-arn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + req := c.newRequest(op, input, &UntagResourceOutput{}) + req.Handlers.Unmarshal.Remove(restjson.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UntagResourceRequest{Request: req, Input: input, Copy: c.UntagResourceRequest} +} + +// UntagResourceRequest is the request type for the +// UntagResource API operation. +type UntagResourceRequest struct { + *aws.Request + Input *UntagResourceInput + Copy func(*UntagResourceInput) UntagResourceRequest +} + +// Send marshals and sends the UntagResource API request. +func (r UntagResourceRequest) Send(ctx context.Context) (*UntagResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UntagResourceResponse{ + UntagResourceOutput: r.Request.Data.(*UntagResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UntagResourceResponse is the response type for the +// UntagResource API operation. +type UntagResourceResponse struct { + *UntagResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UntagResource request. +func (r *UntagResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_UpdateAsset.go b/service/dataexchange/api_op_UpdateAsset.go new file mode 100644 index 00000000000..29e2837e54c --- /dev/null +++ b/service/dataexchange/api_op_UpdateAsset.go @@ -0,0 +1,271 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// The request to update an asset. +type UpdateAssetInput struct { + _ struct{} `type:"structure"` + + // AssetId is a required field + AssetId *string `location:"uri" locationName:"AssetId" type:"string" required:"true"` + + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // The name of the asset. When importing from Amazon S3, the S3 object key is + // used as the asset name. When exporting to Amazon S3, the asset name is used + // as default target S3 object key. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // RevisionId is a required field + RevisionId *string `location:"uri" locationName:"RevisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAssetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAssetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateAssetInput"} + + if s.AssetId == nil { + invalidParams.Add(aws.NewErrParamRequired("AssetId")) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateAssetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AssetId != nil { + v := *s.AssetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AssetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateAssetOutput struct { + _ struct{} `type:"structure"` + + // An Amazon Resource Name (ARN) that uniquely identifies an AWS resource. + Arn *string `type:"string"` + + AssetDetails *AssetDetails `type:"structure"` + + // The type of file your data is stored in. Currently, the supported asset type + // is S3_SNAPSHOT. + AssetType AssetType `type:"string" enum:"true"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A unique identifier. + DataSetId *string `type:"string"` + + // A unique identifier. + Id *string `type:"string"` + + // The name of the asset. When importing from Amazon S3, the S3 object key is + // used as the asset name. When exporting to Amazon S3, the asset name is used + // as default target S3 object key. + Name *string `type:"string"` + + // A unique identifier. + RevisionId *string `type:"string"` + + // A unique identifier. + SourceId *string `type:"string"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s UpdateAssetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateAssetOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AssetDetails != nil { + v := s.AssetDetails + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "AssetDetails", v, metadata) + } + if len(s.AssetType) > 0 { + v := s.AssetType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SourceId != nil { + v := *s.SourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +const opUpdateAsset = "UpdateAsset" + +// UpdateAssetRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation updates an asset. +// +// // Example sending a request using UpdateAssetRequest. +// req := client.UpdateAssetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/UpdateAsset +func (c *Client) UpdateAssetRequest(input *UpdateAssetInput) UpdateAssetRequest { + op := &aws.Operation{ + Name: opUpdateAsset, + HTTPMethod: "PATCH", + HTTPPath: "/v1/data-sets/{DataSetId}/revisions/{RevisionId}/assets/{AssetId}", + } + + if input == nil { + input = &UpdateAssetInput{} + } + + req := c.newRequest(op, input, &UpdateAssetOutput{}) + return UpdateAssetRequest{Request: req, Input: input, Copy: c.UpdateAssetRequest} +} + +// UpdateAssetRequest is the request type for the +// UpdateAsset API operation. +type UpdateAssetRequest struct { + *aws.Request + Input *UpdateAssetInput + Copy func(*UpdateAssetInput) UpdateAssetRequest +} + +// Send marshals and sends the UpdateAsset API request. +func (r UpdateAssetRequest) Send(ctx context.Context) (*UpdateAssetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateAssetResponse{ + UpdateAssetOutput: r.Request.Data.(*UpdateAssetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateAssetResponse is the response type for the +// UpdateAsset API operation. +type UpdateAssetResponse struct { + *UpdateAssetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateAsset request. +func (r *UpdateAssetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_UpdateDataSet.go b/service/dataexchange/api_op_UpdateDataSet.go new file mode 100644 index 00000000000..d90f1bc7304 --- /dev/null +++ b/service/dataexchange/api_op_UpdateDataSet.go @@ -0,0 +1,247 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// The request to update a data set. +type UpdateDataSetInput struct { + _ struct{} `type:"structure"` + + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // The description for the data set. + Description *string `type:"string"` + + // The name of the data set. + Name *string `type:"string"` +} + +// String returns the string representation +func (s UpdateDataSetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDataSetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateDataSetInput"} + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDataSetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Description != nil { + v := *s.Description + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateDataSetOutput struct { + _ struct{} `type:"structure"` + + // An Amazon Resource Name (ARN) that uniquely identifies an AWS resource. + Arn *string `type:"string"` + + // The type of file your data is stored in. Currently, the supported asset type + // is S3_SNAPSHOT. + AssetType AssetType `type:"string" enum:"true"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A description of a resource. + Description *string `type:"string"` + + // A unique identifier. + Id *string `type:"string"` + + // The name of the model. + Name *string `type:"string"` + + // A property that defines the data set as OWNED by the account (for providers) + // or ENTITLED to the account (for subscribers). When an owned data set is published + // in a product, AWS Data Exchange creates a copy of the data set. Subscribers + // can access that copy of the data set as an entitled data set. + Origin Origin `type:"string" enum:"true"` + + OriginDetails *OriginDetails `type:"structure"` + + // A unique identifier. + SourceId *string `type:"string"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s UpdateDataSetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDataSetOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.AssetType) > 0 { + v := s.AssetType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.Description != nil { + v := *s.Description + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Origin) > 0 { + v := s.Origin + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Origin", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.OriginDetails != nil { + v := s.OriginDetails + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "OriginDetails", v, metadata) + } + if s.SourceId != nil { + v := *s.SourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +const opUpdateDataSet = "UpdateDataSet" + +// UpdateDataSetRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation updates a data set. +// +// // Example sending a request using UpdateDataSetRequest. +// req := client.UpdateDataSetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/UpdateDataSet +func (c *Client) UpdateDataSetRequest(input *UpdateDataSetInput) UpdateDataSetRequest { + op := &aws.Operation{ + Name: opUpdateDataSet, + HTTPMethod: "PATCH", + HTTPPath: "/v1/data-sets/{DataSetId}", + } + + if input == nil { + input = &UpdateDataSetInput{} + } + + req := c.newRequest(op, input, &UpdateDataSetOutput{}) + return UpdateDataSetRequest{Request: req, Input: input, Copy: c.UpdateDataSetRequest} +} + +// UpdateDataSetRequest is the request type for the +// UpdateDataSet API operation. +type UpdateDataSetRequest struct { + *aws.Request + Input *UpdateDataSetInput + Copy func(*UpdateDataSetInput) UpdateDataSetRequest +} + +// Send marshals and sends the UpdateDataSet API request. +func (r UpdateDataSetRequest) Send(ctx context.Context) (*UpdateDataSetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateDataSetResponse{ + UpdateDataSetOutput: r.Request.Data.(*UpdateDataSetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateDataSetResponse is the response type for the +// UpdateDataSet API operation. +type UpdateDataSetResponse struct { + *UpdateDataSetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateDataSet request. +func (r *UpdateDataSetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_op_UpdateRevision.go b/service/dataexchange/api_op_UpdateRevision.go new file mode 100644 index 00000000000..2f614c0f529 --- /dev/null +++ b/service/dataexchange/api_op_UpdateRevision.go @@ -0,0 +1,239 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// The request to update a revision. +type UpdateRevisionInput struct { + _ struct{} `type:"structure"` + + // An optional comment about the revision. + Comment *string `type:"string"` + + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // Finalizing a revision tells AWS Data Exchange that your changes to the assets + // in the revision are complete. After it's in this read-only state, you can + // publish the revision to your products. + Finalized *bool `type:"boolean"` + + // RevisionId is a required field + RevisionId *string `location:"uri" locationName:"RevisionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRevisionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRevisionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateRevisionInput"} + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateRevisionInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Comment != nil { + v := *s.Comment + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Comment", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Finalized != nil { + v := *s.Finalized + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Finalized", protocol.BoolValue(v), metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateRevisionOutput struct { + _ struct{} `type:"structure"` + + // An Amazon Resource Name (ARN) that uniquely identifies an AWS resource. + Arn *string `type:"string"` + + Comment *string `type:"string"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A unique identifier. + DataSetId *string `type:"string"` + + Finalized *bool `type:"boolean"` + + // A unique identifier. + Id *string `type:"string"` + + // A unique identifier. + SourceId *string `type:"string"` + + // Dates and times in AWS Data Exchange are recorded in ISO 8601 format. + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s UpdateRevisionOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateRevisionOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Comment != nil { + v := *s.Comment + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Comment", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Finalized != nil { + v := *s.Finalized + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Finalized", protocol.BoolValue(v), metadata) + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SourceId != nil { + v := *s.SourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +const opUpdateRevision = "UpdateRevision" + +// UpdateRevisionRequest returns a request value for making API operation for +// AWS Data Exchange. +// +// This operation updates a revision. +// +// // Example sending a request using UpdateRevisionRequest. +// req := client.UpdateRevisionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dataexchange-2017-07-25/UpdateRevision +func (c *Client) UpdateRevisionRequest(input *UpdateRevisionInput) UpdateRevisionRequest { + op := &aws.Operation{ + Name: opUpdateRevision, + HTTPMethod: "PATCH", + HTTPPath: "/v1/data-sets/{DataSetId}/revisions/{RevisionId}", + } + + if input == nil { + input = &UpdateRevisionInput{} + } + + req := c.newRequest(op, input, &UpdateRevisionOutput{}) + return UpdateRevisionRequest{Request: req, Input: input, Copy: c.UpdateRevisionRequest} +} + +// UpdateRevisionRequest is the request type for the +// UpdateRevision API operation. +type UpdateRevisionRequest struct { + *aws.Request + Input *UpdateRevisionInput + Copy func(*UpdateRevisionInput) UpdateRevisionRequest +} + +// Send marshals and sends the UpdateRevision API request. +func (r UpdateRevisionRequest) Send(ctx context.Context) (*UpdateRevisionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateRevisionResponse{ + UpdateRevisionOutput: r.Request.Data.(*UpdateRevisionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateRevisionResponse is the response type for the +// UpdateRevision API operation. +type UpdateRevisionResponse struct { + *UpdateRevisionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateRevision request. +func (r *UpdateRevisionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dataexchange/api_types.go b/service/dataexchange/api_types.go new file mode 100644 index 00000000000..3518ac4b795 --- /dev/null +++ b/service/dataexchange/api_types.go @@ -0,0 +1,1545 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dataexchange + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +var _ aws.Config +var _ = awsutil.Prettify + +// The destination for the asset. +type AssetDestinationEntry struct { + _ struct{} `type:"structure"` + + // The unique identifier for the asset. + // + // AssetId is a required field + AssetId *string `type:"string" required:"true"` + + // The S3 bucket that is the destination for the asset. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The name of the object in Amazon S3 for the asset. + Key *string `type:"string"` +} + +// String returns the string representation +func (s AssetDestinationEntry) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssetDestinationEntry) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AssetDestinationEntry"} + + if s.AssetId == nil { + invalidParams.Add(aws.NewErrParamRequired("AssetId")) + } + + if s.Bucket == nil { + invalidParams.Add(aws.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AssetDestinationEntry) MarshalFields(e protocol.FieldEncoder) error { + if s.AssetId != nil { + v := *s.AssetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Bucket != nil { + v := *s.Bucket + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Bucket", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Key != nil { + v := *s.Key + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Key", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type AssetDetails struct { + _ struct{} `type:"structure"` + + // The S3 object that is the asset. + S3SnapshotAsset *S3SnapshotAsset `type:"structure"` +} + +// String returns the string representation +func (s AssetDetails) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AssetDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.S3SnapshotAsset != nil { + v := s.S3SnapshotAsset + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "S3SnapshotAsset", v, metadata) + } + return nil +} + +// An asset in AWS Data Exchange is a piece of data that can be stored as an +// S3 object. The asset can be a structured data file, an image file, or some +// other data file. When you create an import job for your files, you create +// an asset in AWS Data Exchange for each of those files. +type AssetEntry struct { + _ struct{} `type:"structure"` + + // The ARN for the asset. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // Information about the asset, including its size. + // + // AssetDetails is a required field + AssetDetails *AssetDetails `type:"structure" required:"true"` + + // The type of file your data is stored in. Currently, the supported asset type + // is S3_SNAPSHOT. + // + // AssetType is a required field + AssetType AssetType `type:"string" required:"true" enum:"true"` + + // The date and time that the asset was created, in ISO 8601 format. + // + // CreatedAt is a required field + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The unique identifier for the data set associated with this asset. + // + // DataSetId is a required field + DataSetId *string `type:"string" required:"true"` + + // The unique identifier for the asset. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The name of the asset. When importing from Amazon S3, the S3 object key is + // used as the asset name. When exporting to Amazon S3, the asset name is used + // as default target S3 object key. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The unique identifier for the revision associated with this asset. + // + // RevisionId is a required field + RevisionId *string `type:"string" required:"true"` + + // The asset ID of the owned asset corresponding to the entitled asset being + // viewed. This parameter is returned when an asset owner is viewing the entitled + // copy of its owned asset. + SourceId *string `type:"string"` + + // The date and time that the asset was last updated, in ISO 8601 format. + // + // UpdatedAt is a required field + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s AssetEntry) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AssetEntry) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AssetDetails != nil { + v := s.AssetDetails + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "AssetDetails", v, metadata) + } + if len(s.AssetType) > 0 { + v := s.AssetType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SourceId != nil { + v := *s.SourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +// The source of the assets. +type AssetSourceEntry struct { + _ struct{} `type:"structure"` + + // The S3 bucket that's part of the source of the asset. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The name of the object in Amazon S3 for the asset. + // + // Key is a required field + Key *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AssetSourceEntry) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssetSourceEntry) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AssetSourceEntry"} + + if s.Bucket == nil { + invalidParams.Add(aws.NewErrParamRequired("Bucket")) + } + + if s.Key == nil { + invalidParams.Add(aws.NewErrParamRequired("Key")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AssetSourceEntry) MarshalFields(e protocol.FieldEncoder) error { + if s.Bucket != nil { + v := *s.Bucket + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Bucket", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Key != nil { + v := *s.Key + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Key", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A data set is an AWS resource with one or more revisions. +type DataSetEntry struct { + _ struct{} `type:"structure"` + + // The ARN for the data set. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // The type of file your data is stored in. Currently, the supported asset type + // is S3_SNAPSHOT. + // + // AssetType is a required field + AssetType AssetType `type:"string" required:"true" enum:"true"` + + // The date and time that the data set was created, in ISO 8601 format. + // + // CreatedAt is a required field + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The description for the data set. + // + // Description is a required field + Description *string `type:"string" required:"true"` + + // The unique identifier for the data set. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The name of the data set. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // A property that defines the data set as OWNED by the account (for providers) + // or ENTITLED to the account (for subscribers). + // + // Origin is a required field + Origin Origin `type:"string" required:"true" enum:"true"` + + // If the origin of this data set is ENTITLED, includes the details for the + // product on AWS Marketplace. + OriginDetails *OriginDetails `type:"structure"` + + // The data set ID of the owned data set corresponding to the entitled data + // set being viewed. This parameter is returned when a data set owner is viewing + // the entitled copy of its owned data set. + SourceId *string `type:"string"` + + // The date and time that the data set was last updated, in ISO 8601 format. + // + // UpdatedAt is a required field + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s DataSetEntry) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DataSetEntry) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.AssetType) > 0 { + v := s.AssetType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.Description != nil { + v := *s.Description + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Origin) > 0 { + v := s.Origin + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Origin", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.OriginDetails != nil { + v := s.OriginDetails + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "OriginDetails", v, metadata) + } + if s.SourceId != nil { + v := *s.SourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +type Details struct { + _ struct{} `type:"structure"` + + ImportAssetFromSignedUrlJobErrorDetails *ImportAssetFromSignedUrlJobErrorDetails `type:"structure"` + + // The list of sources for the assets. + ImportAssetsFromS3JobErrorDetails []AssetSourceEntry `type:"list"` +} + +// String returns the string representation +func (s Details) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Details) MarshalFields(e protocol.FieldEncoder) error { + if s.ImportAssetFromSignedUrlJobErrorDetails != nil { + v := s.ImportAssetFromSignedUrlJobErrorDetails + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ImportAssetFromSignedUrlJobErrorDetails", v, metadata) + } + if s.ImportAssetsFromS3JobErrorDetails != nil { + v := s.ImportAssetsFromS3JobErrorDetails + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ImportAssetsFromS3JobErrorDetails", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// Details of the operation to be performed by the job. +type ExportAssetToSignedUrlRequestDetails struct { + _ struct{} `type:"structure"` + + // The unique identifier for the asset that is exported to a signed URL. + // + // AssetId is a required field + AssetId *string `type:"string" required:"true"` + + // The unique identifier for the data set associated with this export job. + // + // DataSetId is a required field + DataSetId *string `type:"string" required:"true"` + + // The unique identifier for the revision associated with this export request. + // + // RevisionId is a required field + RevisionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ExportAssetToSignedUrlRequestDetails) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportAssetToSignedUrlRequestDetails) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ExportAssetToSignedUrlRequestDetails"} + + if s.AssetId == nil { + invalidParams.Add(aws.NewErrParamRequired("AssetId")) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ExportAssetToSignedUrlRequestDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.AssetId != nil { + v := *s.AssetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The details of the export to signed URL response. +type ExportAssetToSignedUrlResponseDetails struct { + _ struct{} `type:"structure"` + + // The unique identifier for the asset associated with this export job. + // + // AssetId is a required field + AssetId *string `type:"string" required:"true"` + + // The unique identifier for the data set associated with this export job. + // + // DataSetId is a required field + DataSetId *string `type:"string" required:"true"` + + // The unique identifier for the revision associated with this export response. + // + // RevisionId is a required field + RevisionId *string `type:"string" required:"true"` + + // The signed URL for the export request. + SignedUrl *string `type:"string"` + + // The date and time that the signed URL expires, in ISO 8601 format. + SignedUrlExpiresAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ExportAssetToSignedUrlResponseDetails) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ExportAssetToSignedUrlResponseDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.AssetId != nil { + v := *s.AssetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SignedUrl != nil { + v := *s.SignedUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SignedUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SignedUrlExpiresAt != nil { + v := *s.SignedUrlExpiresAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SignedUrlExpiresAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +// Details of the operation to be performed by the job. +type ExportAssetsToS3RequestDetails struct { + _ struct{} `type:"structure"` + + // The destination for the asset. + // + // AssetDestinations is a required field + AssetDestinations []AssetDestinationEntry `type:"list" required:"true"` + + // The unique identifier for the data set associated with this export job. + // + // DataSetId is a required field + DataSetId *string `type:"string" required:"true"` + + // The unique identifier for the revision associated with this export request. + // + // RevisionId is a required field + RevisionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ExportAssetsToS3RequestDetails) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportAssetsToS3RequestDetails) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ExportAssetsToS3RequestDetails"} + + if s.AssetDestinations == nil { + invalidParams.Add(aws.NewErrParamRequired("AssetDestinations")) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + if s.AssetDestinations != nil { + for i, v := range s.AssetDestinations { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AssetDestinations", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ExportAssetsToS3RequestDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.AssetDestinations != nil { + v := s.AssetDestinations + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "AssetDestinations", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Details about the export to Amazon S3 response. +type ExportAssetsToS3ResponseDetails struct { + _ struct{} `type:"structure"` + + // The destination in Amazon S3 where the asset is exported. + // + // AssetDestinations is a required field + AssetDestinations []AssetDestinationEntry `type:"list" required:"true"` + + // The unique identifier for the data set associated with this export job. + // + // DataSetId is a required field + DataSetId *string `type:"string" required:"true"` + + // The unique identifier for the revision associated with this export response. + // + // RevisionId is a required field + RevisionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ExportAssetsToS3ResponseDetails) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ExportAssetsToS3ResponseDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.AssetDestinations != nil { + v := s.AssetDestinations + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "AssetDestinations", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ImportAssetFromSignedUrlJobErrorDetails struct { + _ struct{} `type:"structure"` + + // The name of the asset. When importing from Amazon S3, the S3 object key is + // used as the asset name. When exporting to Amazon S3, the asset name is used + // as default target S3 object key. + // + // AssetName is a required field + AssetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ImportAssetFromSignedUrlJobErrorDetails) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ImportAssetFromSignedUrlJobErrorDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.AssetName != nil { + v := *s.AssetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Details of the operation to be performed by the job. +type ImportAssetFromSignedUrlRequestDetails struct { + _ struct{} `type:"structure"` + + // The name of the asset. When importing from Amazon S3, the S3 object key is + // used as the asset name. + // + // AssetName is a required field + AssetName *string `type:"string" required:"true"` + + // The unique identifier for the data set associated with this import job. + // + // DataSetId is a required field + DataSetId *string `type:"string" required:"true"` + + // The Base64-encoded Md5 hash for the asset, used to ensure the integrity of + // the file at that location. + // + // Md5Hash is a required field + Md5Hash *string `min:"24" type:"string" required:"true"` + + // The unique identifier for the revision associated with this import request. + // + // RevisionId is a required field + RevisionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ImportAssetFromSignedUrlRequestDetails) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportAssetFromSignedUrlRequestDetails) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ImportAssetFromSignedUrlRequestDetails"} + + if s.AssetName == nil { + invalidParams.Add(aws.NewErrParamRequired("AssetName")) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.Md5Hash == nil { + invalidParams.Add(aws.NewErrParamRequired("Md5Hash")) + } + if s.Md5Hash != nil && len(*s.Md5Hash) < 24 { + invalidParams.Add(aws.NewErrParamMinLen("Md5Hash", 24)) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ImportAssetFromSignedUrlRequestDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.AssetName != nil { + v := *s.AssetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Md5Hash != nil { + v := *s.Md5Hash + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Md5Hash", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The details in the response for an import request, including the signed URL +// and other information. +type ImportAssetFromSignedUrlResponseDetails struct { + _ struct{} `type:"structure"` + + // The name for the asset associated with this import response. + // + // AssetName is a required field + AssetName *string `type:"string" required:"true"` + + // The unique identifier for the data set associated with this import job. + // + // DataSetId is a required field + DataSetId *string `type:"string" required:"true"` + + // The Base64-encoded Md5 hash for the asset, used to ensure the integrity of + // the file at that location. + Md5Hash *string `min:"24" type:"string"` + + // The unique identifier for the revision associated with this import response. + // + // RevisionId is a required field + RevisionId *string `type:"string" required:"true"` + + // The signed URL. + SignedUrl *string `type:"string"` + + // The time and date at which the signed URL expires, in ISO 8601 format. + SignedUrlExpiresAt *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ImportAssetFromSignedUrlResponseDetails) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ImportAssetFromSignedUrlResponseDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.AssetName != nil { + v := *s.AssetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Md5Hash != nil { + v := *s.Md5Hash + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Md5Hash", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SignedUrl != nil { + v := *s.SignedUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SignedUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SignedUrlExpiresAt != nil { + v := *s.SignedUrlExpiresAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SignedUrlExpiresAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +// Details of the operation to be performed by the job. +type ImportAssetsFromS3RequestDetails struct { + _ struct{} `type:"structure"` + + // Is a list of S3 bucket and object key pairs. + // + // AssetSources is a required field + AssetSources []AssetSourceEntry `type:"list" required:"true"` + + // The unique identifier for the data set associated with this import job. + // + // DataSetId is a required field + DataSetId *string `type:"string" required:"true"` + + // The unique identifier for the revision associated with this import request. + // + // RevisionId is a required field + RevisionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ImportAssetsFromS3RequestDetails) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportAssetsFromS3RequestDetails) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ImportAssetsFromS3RequestDetails"} + + if s.AssetSources == nil { + invalidParams.Add(aws.NewErrParamRequired("AssetSources")) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.RevisionId == nil { + invalidParams.Add(aws.NewErrParamRequired("RevisionId")) + } + if s.AssetSources != nil { + for i, v := range s.AssetSources { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AssetSources", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ImportAssetsFromS3RequestDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.AssetSources != nil { + v := s.AssetSources + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "AssetSources", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Details from an import from Amazon S3 response. +type ImportAssetsFromS3ResponseDetails struct { + _ struct{} `type:"structure"` + + // Is a list of Amazon S3 bucket and object key pairs. + // + // AssetSources is a required field + AssetSources []AssetSourceEntry `type:"list" required:"true"` + + // The unique identifier for the data set associated with this import job. + // + // DataSetId is a required field + DataSetId *string `type:"string" required:"true"` + + // The unique identifier for the revision associated with this import response. + // + // RevisionId is a required field + RevisionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ImportAssetsFromS3ResponseDetails) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ImportAssetsFromS3ResponseDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.AssetSources != nil { + v := s.AssetSources + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "AssetSources", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RevisionId != nil { + v := *s.RevisionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RevisionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// AWS Data Exchange Jobs are asynchronous import or export operations used +// to create or copy assets. A data set owner can both import and export as +// they see fit. Someone with an entitlement to a data set can only export. +// Jobs are deleted 90 days after they are created. +type JobEntry struct { + _ struct{} `type:"structure"` + + // The ARN for the job. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // The date and time that the job was created, in ISO 8601 format. + // + // CreatedAt is a required field + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Details of the operation to be performed by the job, such as export destination + // details or import source details. + // + // Details is a required field + Details *ResponseDetails `type:"structure" required:"true"` + + // Errors for jobs. + Errors []JobError `type:"list"` + + // The unique identifier for the job. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The state of the job. + // + // State is a required field + State State `type:"string" required:"true" enum:"true"` + + // The job type. + // + // Type is a required field + Type Type `type:"string" required:"true" enum:"true"` + + // The date and time that the job was last updated, in ISO 8601 format. + // + // UpdatedAt is a required field + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s JobEntry) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s JobEntry) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.Details != nil { + v := s.Details + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Details", v, metadata) + } + if s.Errors != nil { + v := s.Errors + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Errors", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.State) > 0 { + v := s.State + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "State", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +// An error that occurred with the job request. +type JobError struct { + _ struct{} `type:"structure"` + + // The code for the job error. + // + // Code is a required field + Code Code `type:"string" required:"true" enum:"true"` + + Details *Details `type:"structure"` + + // The name of the limit that was reached. + LimitName JobErrorLimitName `type:"string" enum:"true"` + + // The value of the exceeded limit. + LimitValue *float64 `type:"double"` + + // The message related to the job error. + // + // Message is a required field + Message *string `type:"string" required:"true"` + + // The unqiue identifier for the resource related to the error. + ResourceId *string `type:"string"` + + // The type of resource related to the error. + ResourceType JobErrorResourceTypes `type:"string" enum:"true"` +} + +// String returns the string representation +func (s JobError) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s JobError) MarshalFields(e protocol.FieldEncoder) error { + if len(s.Code) > 0 { + v := s.Code + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Code", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.Details != nil { + v := s.Details + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Details", v, metadata) + } + if len(s.LimitName) > 0 { + v := s.LimitName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LimitName", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.LimitValue != nil { + v := *s.LimitValue + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LimitValue", protocol.Float64Value(v), metadata) + } + if s.Message != nil { + v := *s.Message + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Message", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ResourceId != nil { + v := *s.ResourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ResourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.ResourceType) > 0 { + v := s.ResourceType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ResourceType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +type OriginDetails struct { + _ struct{} `type:"structure"` + + // ProductId is a required field + ProductId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s OriginDetails) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s OriginDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.ProductId != nil { + v := *s.ProductId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ProductId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The details for the request. +type RequestDetails struct { + _ struct{} `type:"structure"` + + // Details about the export to signed URL request. + ExportAssetToSignedUrl *ExportAssetToSignedUrlRequestDetails `type:"structure"` + + // Details about the export to Amazon S3 request. + ExportAssetsToS3 *ExportAssetsToS3RequestDetails `type:"structure"` + + // Details about the import from signed URL request. + ImportAssetFromSignedUrl *ImportAssetFromSignedUrlRequestDetails `type:"structure"` + + // Details about the import from Amazon S3 request. + ImportAssetsFromS3 *ImportAssetsFromS3RequestDetails `type:"structure"` +} + +// String returns the string representation +func (s RequestDetails) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestDetails) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RequestDetails"} + if s.ExportAssetToSignedUrl != nil { + if err := s.ExportAssetToSignedUrl.Validate(); err != nil { + invalidParams.AddNested("ExportAssetToSignedUrl", err.(aws.ErrInvalidParams)) + } + } + if s.ExportAssetsToS3 != nil { + if err := s.ExportAssetsToS3.Validate(); err != nil { + invalidParams.AddNested("ExportAssetsToS3", err.(aws.ErrInvalidParams)) + } + } + if s.ImportAssetFromSignedUrl != nil { + if err := s.ImportAssetFromSignedUrl.Validate(); err != nil { + invalidParams.AddNested("ImportAssetFromSignedUrl", err.(aws.ErrInvalidParams)) + } + } + if s.ImportAssetsFromS3 != nil { + if err := s.ImportAssetsFromS3.Validate(); err != nil { + invalidParams.AddNested("ImportAssetsFromS3", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RequestDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.ExportAssetToSignedUrl != nil { + v := s.ExportAssetToSignedUrl + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ExportAssetToSignedUrl", v, metadata) + } + if s.ExportAssetsToS3 != nil { + v := s.ExportAssetsToS3 + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ExportAssetsToS3", v, metadata) + } + if s.ImportAssetFromSignedUrl != nil { + v := s.ImportAssetFromSignedUrl + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ImportAssetFromSignedUrl", v, metadata) + } + if s.ImportAssetsFromS3 != nil { + v := s.ImportAssetsFromS3 + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ImportAssetsFromS3", v, metadata) + } + return nil +} + +// Details for the response. +type ResponseDetails struct { + _ struct{} `type:"structure"` + + // Details for the export to signed URL response. + ExportAssetToSignedUrl *ExportAssetToSignedUrlResponseDetails `type:"structure"` + + // Details for the export to Amazon S3 response. + ExportAssetsToS3 *ExportAssetsToS3ResponseDetails `type:"structure"` + + // Details for the import from signed URL response. + ImportAssetFromSignedUrl *ImportAssetFromSignedUrlResponseDetails `type:"structure"` + + // Details for the import from Amazon S3 response. + ImportAssetsFromS3 *ImportAssetsFromS3ResponseDetails `type:"structure"` +} + +// String returns the string representation +func (s ResponseDetails) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ResponseDetails) MarshalFields(e protocol.FieldEncoder) error { + if s.ExportAssetToSignedUrl != nil { + v := s.ExportAssetToSignedUrl + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ExportAssetToSignedUrl", v, metadata) + } + if s.ExportAssetsToS3 != nil { + v := s.ExportAssetsToS3 + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ExportAssetsToS3", v, metadata) + } + if s.ImportAssetFromSignedUrl != nil { + v := s.ImportAssetFromSignedUrl + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ImportAssetFromSignedUrl", v, metadata) + } + if s.ImportAssetsFromS3 != nil { + v := s.ImportAssetsFromS3 + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ImportAssetsFromS3", v, metadata) + } + return nil +} + +// A revision is a container for one or more assets. +type RevisionEntry struct { + _ struct{} `type:"structure"` + + // The ARN for the revision. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // An optional comment about the revision. + Comment *string `type:"string"` + + // The date and time that the revision was created, in ISO 8601 format. + // + // CreatedAt is a required field + CreatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The unique identifier for the data set associated with this revision. + // + // DataSetId is a required field + DataSetId *string `type:"string" required:"true"` + + // To publish a revision to a data set in a product, the revision must first + // be finalized. Finalizing a revision tells AWS Data Exchange that your changes + // to the assets in the revision are complete. After it's in this read-only + // state, you can publish the revision to your products. + // + // Finalized revisions can be published through the AWS Data Exchange console + // or the AWS Marketplace Catalog API, using the StartChangeSet AWS Marketplace + // Catalog API action. When using the API, revisions are uniquely identified + // by their ARN. + Finalized *bool `type:"boolean"` + + // The unique identifier for the revision. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The revision ID of the owned revision corresponding to the entitled revision + // being viewed. This parameter is returned when a revision owner is viewing + // the entitled copy of its owned revision. + SourceId *string `type:"string"` + + // The date and time that the revision was last updated, in ISO 8601 format. + // + // UpdatedAt is a required field + UpdatedAt *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s RevisionEntry) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RevisionEntry) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Comment != nil { + v := *s.Comment + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Comment", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Finalized != nil { + v := *s.Finalized + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Finalized", protocol.BoolValue(v), metadata) + } + if s.Id != nil { + v := *s.Id + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Id", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SourceId != nil { + v := *s.SourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UpdatedAt != nil { + v := *s.UpdatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdatedAt", + protocol.TimeValue{V: v, Format: "iso8601", QuotedFormatTime: true}, metadata) + } + return nil +} + +// The S3 object that is the asset. +type S3SnapshotAsset struct { + _ struct{} `type:"structure"` + + // The size of the S3 object that is the object. + // + // Size is a required field + Size *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s S3SnapshotAsset) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s S3SnapshotAsset) MarshalFields(e protocol.FieldEncoder) error { + if s.Size != nil { + v := *s.Size + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Size", protocol.Float64Value(v), metadata) + } + return nil +} diff --git a/service/dataexchange/dataexchangeiface/interface.go b/service/dataexchange/dataexchangeiface/interface.go new file mode 100644 index 00000000000..141ed308a95 --- /dev/null +++ b/service/dataexchange/dataexchangeiface/interface.go @@ -0,0 +1,109 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package dataexchangeiface provides an interface to enable mocking the AWS Data Exchange service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package dataexchangeiface + +import ( + "github.com/aws/aws-sdk-go-v2/service/dataexchange" +) + +// ClientAPI provides an interface to enable mocking the +// dataexchange.Client methods. This make unit testing your code that +// calls out to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Data Exchange. +// func myFunc(svc dataexchangeiface.ClientAPI) bool { +// // Make svc.CancelJob request +// } +// +// func main() { +// cfg, err := external.LoadDefaultAWSConfig() +// if err != nil { +// panic("failed to load config, " + err.Error()) +// } +// +// svc := dataexchange.New(cfg) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockClientClient struct { +// dataexchangeiface.ClientPI +// } +// func (m *mockClientClient) CancelJob(input *dataexchange.CancelJobInput) (*dataexchange.CancelJobOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockClientClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type ClientAPI interface { + CancelJobRequest(*dataexchange.CancelJobInput) dataexchange.CancelJobRequest + + CreateDataSetRequest(*dataexchange.CreateDataSetInput) dataexchange.CreateDataSetRequest + + CreateJobRequest(*dataexchange.CreateJobInput) dataexchange.CreateJobRequest + + CreateRevisionRequest(*dataexchange.CreateRevisionInput) dataexchange.CreateRevisionRequest + + DeleteAssetRequest(*dataexchange.DeleteAssetInput) dataexchange.DeleteAssetRequest + + DeleteDataSetRequest(*dataexchange.DeleteDataSetInput) dataexchange.DeleteDataSetRequest + + DeleteRevisionRequest(*dataexchange.DeleteRevisionInput) dataexchange.DeleteRevisionRequest + + GetAssetRequest(*dataexchange.GetAssetInput) dataexchange.GetAssetRequest + + GetDataSetRequest(*dataexchange.GetDataSetInput) dataexchange.GetDataSetRequest + + GetJobRequest(*dataexchange.GetJobInput) dataexchange.GetJobRequest + + GetRevisionRequest(*dataexchange.GetRevisionInput) dataexchange.GetRevisionRequest + + ListDataSetRevisionsRequest(*dataexchange.ListDataSetRevisionsInput) dataexchange.ListDataSetRevisionsRequest + + ListDataSetsRequest(*dataexchange.ListDataSetsInput) dataexchange.ListDataSetsRequest + + ListJobsRequest(*dataexchange.ListJobsInput) dataexchange.ListJobsRequest + + ListRevisionAssetsRequest(*dataexchange.ListRevisionAssetsInput) dataexchange.ListRevisionAssetsRequest + + ListTagsForResourceRequest(*dataexchange.ListTagsForResourceInput) dataexchange.ListTagsForResourceRequest + + StartJobRequest(*dataexchange.StartJobInput) dataexchange.StartJobRequest + + TagResourceRequest(*dataexchange.TagResourceInput) dataexchange.TagResourceRequest + + UntagResourceRequest(*dataexchange.UntagResourceInput) dataexchange.UntagResourceRequest + + UpdateAssetRequest(*dataexchange.UpdateAssetInput) dataexchange.UpdateAssetRequest + + UpdateDataSetRequest(*dataexchange.UpdateDataSetInput) dataexchange.UpdateDataSetRequest + + UpdateRevisionRequest(*dataexchange.UpdateRevisionInput) dataexchange.UpdateRevisionRequest +} + +var _ ClientAPI = (*dataexchange.Client)(nil) diff --git a/service/datasync/api_enums.go b/service/datasync/api_enums.go index 184f6c11550..072656bd398 100644 --- a/service/datasync/api_enums.go +++ b/service/datasync/api_enums.go @@ -42,6 +42,7 @@ type EndpointType string const ( EndpointTypePublic EndpointType = "PUBLIC" EndpointTypePrivateLink EndpointType = "PRIVATE_LINK" + EndpointTypeFips EndpointType = "FIPS" ) func (enum EndpointType) MarshalValue() (string, error) { @@ -163,9 +164,8 @@ type PosixPermissions string // Enum values for PosixPermissions const ( - PosixPermissionsNone PosixPermissions = "NONE" - PosixPermissionsBestEffort PosixPermissions = "BEST_EFFORT" - PosixPermissionsPreserve PosixPermissions = "PRESERVE" + PosixPermissionsNone PosixPermissions = "NONE" + PosixPermissionsPreserve PosixPermissions = "PRESERVE" ) func (enum PosixPermissions) MarshalValue() (string, error) { diff --git a/service/datasync/api_op_CreateLocationEfs.go b/service/datasync/api_op_CreateLocationEfs.go index dc2696b9d81..d8a49edb497 100644 --- a/service/datasync/api_op_CreateLocationEfs.go +++ b/service/datasync/api_op_CreateLocationEfs.go @@ -45,6 +45,8 @@ type CreateLocationEfsInput struct { // A subdirectory in the location’s path. This subdirectory in the EFS file // system is used to read data from the EFS source location or write data to // the EFS destination. By default, AWS DataSync uses the root directory. + // + // Subdirectory must be specified with forward slashes. For example /path/to/folder. Subdirectory *string `type:"string"` // The key-value pair that represents a tag that you want to add to the resource. diff --git a/service/datasync/api_op_CreateLocationSmb.go b/service/datasync/api_op_CreateLocationSmb.go index a74763269ab..91fc842b385 100644 --- a/service/datasync/api_op_CreateLocationSmb.go +++ b/service/datasync/api_op_CreateLocationSmb.go @@ -30,7 +30,7 @@ type CreateLocationSmbInput struct { // access files and folders in the SMB share. // // Password is a required field - Password *string `type:"string" required:"true"` + Password *string `type:"string" required:"true" sensitive:"true"` // The name of the SMB server. This value is the IP address or Domain Name Service // (DNS) name of the SMB server. An agent that is installed on-premises uses @@ -48,6 +48,8 @@ type CreateLocationSmbInput struct { // The path should be such that it can be mounted by other SMB clients in your // network. // + // Subdirectory must be specified with forward slashes. For example /path/to/folder. + // // To transfer all the data in the folder you specified, DataSync needs to have // permissions to mount the SMB share, as well as to access all the data in // that share. To ensure this, either ensure that the user/password specified @@ -136,7 +138,7 @@ const opCreateLocationSmb = "CreateLocationSmb" // AWS DataSync. // // Defines a file system on an Server Message Block (SMB) server that can be -// read from or written to +// read from or written to. // // // Example sending a request using CreateLocationSmbRequest. // req := client.CreateLocationSmbRequest(params) diff --git a/service/datasync/api_op_CreateTask.go b/service/datasync/api_op_CreateTask.go index 0b455971b45..9fd1e6f3113 100644 --- a/service/datasync/api_op_CreateTask.go +++ b/service/datasync/api_op_CreateTask.go @@ -49,6 +49,11 @@ type CreateTaskInput struct { // see the operation. Options *Options `type:"structure"` + // Specifies a schedule used to periodically transfer files from a source to + // a destination location. The schedule should be specified in UTC time. For + // more information, see task-scheduling. + Schedule *TaskSchedule `type:"structure"` + // The Amazon Resource Name (ARN) of the source location for the task. // // SourceLocationArn is a required field @@ -83,6 +88,11 @@ func (s *CreateTaskInput) Validate() error { invalidParams.AddNested("Options", err.(aws.ErrInvalidParams)) } } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(aws.ErrInvalidParams)) + } + } if s.Tags != nil { for i, v := range s.Tags { if err := v.Validate(); err != nil { diff --git a/service/datasync/api_op_DescribeTask.go b/service/datasync/api_op_DescribeTask.go index 9e62adfb807..4a2839fd0a1 100644 --- a/service/datasync/api_op_DescribeTask.go +++ b/service/datasync/api_op_DescribeTask.go @@ -89,6 +89,10 @@ type DescribeTaskOutput struct { // the overriding OverrideOptions value to operation. Options *Options `type:"structure"` + // The schedule used to periodically transfer files from a source to a destination + // location. + Schedule *TaskSchedule `type:"structure"` + // The Amazon Resource Name (ARN) of the source file system's location. SourceLocationArn *string `type:"string"` diff --git a/service/datasync/api_op_UpdateTask.go b/service/datasync/api_op_UpdateTask.go index 71af0823b3d..7da79249a8a 100644 --- a/service/datasync/api_op_UpdateTask.go +++ b/service/datasync/api_op_UpdateTask.go @@ -36,6 +36,13 @@ type UpdateTaskInput struct { // value to StartTaskExecution. Options *Options `type:"structure"` + // Specifies a schedule used to periodically transfer files from a source to + // a destination location. You can configure your task to execute hourly, daily, + // weekly or on specific days of the week. You control when in the day or hour + // you want the task to execute. The time you specify is UTC time. For more + // information, see task-scheduling. + Schedule *TaskSchedule `type:"structure"` + // The Amazon Resource Name (ARN) of the resource name of the task to update. // // TaskArn is a required field @@ -62,6 +69,11 @@ func (s *UpdateTaskInput) Validate() error { invalidParams.AddNested("Options", err.(aws.ErrInvalidParams)) } } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(aws.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams diff --git a/service/datasync/api_types.go b/service/datasync/api_types.go index ba39a90f88e..28e1efca3be 100644 --- a/service/datasync/api_types.go +++ b/service/datasync/api_types.go @@ -302,10 +302,10 @@ type Options struct { PreserveDevices PreserveDevices `type:"string" enum:"true"` // A value that determines whether tasks should be queued before executing the - // tasks. If set to Enabled, the tasks will queued. The default is Enabled. + // tasks. If set to ENABLED, the tasks will be queued. The default is ENABLED. // // If you use the same agent to run multiple tasks you can enable the tasks - // to run in series. For more information see task-queue. + // to run in series. For more information see queue-task-execution. TaskQueueing TaskQueueing `type:"string" enum:"true"` // The user ID (UID) of the file's owner. @@ -508,6 +508,10 @@ type TaskExecutionResultDetail struct { // The status of the PREPARING phase. PrepareStatus PhaseStatus `type:"string" enum:"true"` + // The total time in milliseconds that AWS DataSync took to transfer the file + // from the source to the destination location. + TotalDuration *int64 `type:"long"` + // The total time in milliseconds that AWS DataSync spent in the TRANSFERRING // phase. TransferDuration *int64 `type:"long"` @@ -548,3 +552,34 @@ type TaskListEntry struct { func (s TaskListEntry) String() string { return awsutil.Prettify(s) } + +// Specifies the schedule you want your task to use for repeated executions. +// For more information, see Schedule Expressions for Rules (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html). +type TaskSchedule struct { + _ struct{} `type:"structure"` + + // A cron expression that specifies when AWS DataSync initiates a scheduled + // transfer from a source to a destination location. + // + // ScheduleExpression is a required field + ScheduleExpression *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TaskSchedule) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TaskSchedule) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TaskSchedule"} + + if s.ScheduleExpression == nil { + invalidParams.Add(aws.NewErrParamRequired("ScheduleExpression")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} diff --git a/service/dlm/api_op_CreateLifecyclePolicy.go b/service/dlm/api_op_CreateLifecyclePolicy.go index e7d790e6a2b..211624b7a99 100644 --- a/service/dlm/api_op_CreateLifecyclePolicy.go +++ b/service/dlm/api_op_CreateLifecyclePolicy.go @@ -27,8 +27,6 @@ type CreateLifecyclePolicyInput struct { // The configuration details of the lifecycle policy. // - // Target tags cannot be re-used across lifecycle policies. - // // PolicyDetails is a required field PolicyDetails *PolicyDetails `type:"structure" required:"true"` @@ -36,6 +34,9 @@ type CreateLifecyclePolicyInput struct { // // State is a required field State SettablePolicyStateValues `type:"string" required:"true" enum:"true"` + + // The tags to apply to the lifecycle policy during creation. + Tags map[string]string `min:"1" type:"map"` } // String returns the string representation @@ -61,6 +62,9 @@ func (s *CreateLifecyclePolicyInput) Validate() error { if len(s.State) == 0 { invalidParams.Add(aws.NewErrParamRequired("State")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) + } if s.PolicyDetails != nil { if err := s.PolicyDetails.Validate(); err != nil { invalidParams.AddNested("PolicyDetails", err.(aws.ErrInvalidParams)) @@ -101,6 +105,18 @@ func (s CreateLifecyclePolicyInput) MarshalFields(e protocol.FieldEncoder) error metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "State", protocol.QuotedValue{ValueMarshaler: v}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } diff --git a/service/dlm/api_op_ListTagsForResource.go b/service/dlm/api_op_ListTagsForResource.go new file mode 100644 index 00000000000..566884c8167 --- /dev/null +++ b/service/dlm/api_op_ListTagsForResource.go @@ -0,0 +1,149 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dlm + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTagsForResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTagsForResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "resourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // Information about the tags. + Tags map[string]string `min:"1" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTagsForResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + return nil +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest returns a request value for making API operation for +// Amazon Data Lifecycle Manager. +// +// Lists the tags for the specified resource. +// +// // Example sending a request using ListTagsForResourceRequest. +// req := client.ListTagsForResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dlm-2018-01-12/ListTagsForResource +func (c *Client) ListTagsForResourceRequest(input *ListTagsForResourceInput) ListTagsForResourceRequest { + op := &aws.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req := c.newRequest(op, input, &ListTagsForResourceOutput{}) + return ListTagsForResourceRequest{Request: req, Input: input, Copy: c.ListTagsForResourceRequest} +} + +// ListTagsForResourceRequest is the request type for the +// ListTagsForResource API operation. +type ListTagsForResourceRequest struct { + *aws.Request + Input *ListTagsForResourceInput + Copy func(*ListTagsForResourceInput) ListTagsForResourceRequest +} + +// Send marshals and sends the ListTagsForResource API request. +func (r ListTagsForResourceRequest) Send(ctx context.Context) (*ListTagsForResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTagsForResourceResponse{ + ListTagsForResourceOutput: r.Request.Data.(*ListTagsForResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListTagsForResourceResponse is the response type for the +// ListTagsForResource API operation. +type ListTagsForResourceResponse struct { + *ListTagsForResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTagsForResource request. +func (r *ListTagsForResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dlm/api_op_TagResource.go b/service/dlm/api_op_TagResource.go new file mode 100644 index 00000000000..34fe74fe20f --- /dev/null +++ b/service/dlm/api_op_TagResource.go @@ -0,0 +1,158 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dlm + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // One or more tags. + // + // Tags is a required field + Tags map[string]string `min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TagResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if s.Tags == nil { + invalidParams.Add(aws.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "resourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opTagResource = "TagResource" + +// TagResourceRequest returns a request value for making API operation for +// Amazon Data Lifecycle Manager. +// +// Adds the specified tags to the specified resource. +// +// // Example sending a request using TagResourceRequest. +// req := client.TagResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dlm-2018-01-12/TagResource +func (c *Client) TagResourceRequest(input *TagResourceInput) TagResourceRequest { + op := &aws.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + req := c.newRequest(op, input, &TagResourceOutput{}) + return TagResourceRequest{Request: req, Input: input, Copy: c.TagResourceRequest} +} + +// TagResourceRequest is the request type for the +// TagResource API operation. +type TagResourceRequest struct { + *aws.Request + Input *TagResourceInput + Copy func(*TagResourceInput) TagResourceRequest +} + +// Send marshals and sends the TagResource API request. +func (r TagResourceRequest) Send(ctx context.Context) (*TagResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &TagResourceResponse{ + TagResourceOutput: r.Request.Data.(*TagResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// TagResourceResponse is the response type for the +// TagResource API operation. +type TagResourceResponse struct { + *TagResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// TagResource request. +func (r *TagResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dlm/api_op_UntagResource.go b/service/dlm/api_op_UntagResource.go new file mode 100644 index 00000000000..5f22fd33941 --- /dev/null +++ b/service/dlm/api_op_UntagResource.go @@ -0,0 +1,158 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dlm + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // The tag keys. + // + // TagKeys is a required field + TagKeys []string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UntagResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if s.TagKeys == nil { + invalidParams.Add(aws.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UntagResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "resourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TagKeys != nil { + v := s.TagKeys + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.QueryTarget, "tagKeys", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UntagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest returns a request value for making API operation for +// Amazon Data Lifecycle Manager. +// +// Removes the specified tags from the specified resource. +// +// // Example sending a request using UntagResourceRequest. +// req := client.UntagResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/dlm-2018-01-12/UntagResource +func (c *Client) UntagResourceRequest(input *UntagResourceInput) UntagResourceRequest { + op := &aws.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + req := c.newRequest(op, input, &UntagResourceOutput{}) + return UntagResourceRequest{Request: req, Input: input, Copy: c.UntagResourceRequest} +} + +// UntagResourceRequest is the request type for the +// UntagResource API operation. +type UntagResourceRequest struct { + *aws.Request + Input *UntagResourceInput + Copy func(*UntagResourceInput) UntagResourceRequest +} + +// Send marshals and sends the UntagResource API request. +func (r UntagResourceRequest) Send(ctx context.Context) (*UntagResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UntagResourceResponse{ + UntagResourceOutput: r.Request.Data.(*UntagResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UntagResourceResponse is the response type for the +// UntagResource API operation. +type UntagResourceResponse struct { + *UntagResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UntagResource request. +func (r *UntagResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/dlm/api_op_UpdateLifecyclePolicy.go b/service/dlm/api_op_UpdateLifecyclePolicy.go index d9a05556ad0..298a35ac756 100644 --- a/service/dlm/api_op_UpdateLifecyclePolicy.go +++ b/service/dlm/api_op_UpdateLifecyclePolicy.go @@ -20,9 +20,8 @@ type UpdateLifecyclePolicyInput struct { // specified by the lifecycle policy. ExecutionRoleArn *string `type:"string"` - // The configuration of the lifecycle policy. - // - // Target tags cannot be re-used across policies. + // The configuration of the lifecycle policy. You cannot update the policy type + // or the resource type. PolicyDetails *PolicyDetails `type:"structure"` // The identifier of the lifecycle policy. diff --git a/service/dlm/api_types.go b/service/dlm/api_types.go index 6defb10aaaf..7995e017033 100644 --- a/service/dlm/api_types.go +++ b/service/dlm/api_types.go @@ -89,6 +89,73 @@ func (s CreateRule) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Specifies when to enable fast snapshot restore. +type FastRestoreRule struct { + _ struct{} `type:"structure"` + + // The Availability Zones in which to enable fast snapshot restore. + // + // AvailabilityZones is a required field + AvailabilityZones []string `min:"1" type:"list" required:"true"` + + // The number of snapshots to be enabled with fast snapshot restore. + // + // Count is a required field + Count *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s FastRestoreRule) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FastRestoreRule) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "FastRestoreRule"} + + if s.AvailabilityZones == nil { + invalidParams.Add(aws.NewErrParamRequired("AvailabilityZones")) + } + if s.AvailabilityZones != nil && len(s.AvailabilityZones) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AvailabilityZones", 1)) + } + + if s.Count == nil { + invalidParams.Add(aws.NewErrParamRequired("Count")) + } + if s.Count != nil && *s.Count < 1 { + invalidParams.Add(aws.NewErrParamMinValue("Count", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s FastRestoreRule) MarshalFields(e protocol.FieldEncoder) error { + if s.AvailabilityZones != nil { + v := s.AvailabilityZones + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "AvailabilityZones", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.Count != nil { + v := *s.Count + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Count", protocol.Int64Value(v), metadata) + } + return nil +} + // Detailed information about a lifecycle policy. type LifecyclePolicy struct { _ struct{} `type:"structure"` @@ -106,6 +173,9 @@ type LifecyclePolicy struct { // specified by the lifecycle policy. ExecutionRoleArn *string `type:"string"` + // The Amazon Resource Name (ARN) of the policy. + PolicyArn *string `type:"string"` + // The configuration of the lifecycle policy PolicyDetails *PolicyDetails `type:"structure"` @@ -114,6 +184,12 @@ type LifecyclePolicy struct { // The activation state of the lifecycle policy. State GettablePolicyStateValues `type:"string" enum:"true"` + + // The description of the status. + StatusMessage *string `type:"string"` + + // The tags. + Tags map[string]string `min:"1" type:"map"` } // String returns the string representation @@ -149,6 +225,12 @@ func (s LifecyclePolicy) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "ExecutionRoleArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.PolicyArn != nil { + v := *s.PolicyArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PolicyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.PolicyDetails != nil { v := s.PolicyDetails @@ -167,6 +249,24 @@ func (s LifecyclePolicy) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "State", protocol.QuotedValue{ValueMarshaler: v}, metadata) } + if s.StatusMessage != nil { + v := *s.StatusMessage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "StatusMessage", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } @@ -182,6 +282,9 @@ type LifecyclePolicySummary struct { // The activation state of the lifecycle policy. State GettablePolicyStateValues `type:"string" enum:"true"` + + // The tags. + Tags map[string]string `min:"1" type:"map"` } // String returns the string representation @@ -209,6 +312,18 @@ func (s LifecyclePolicySummary) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "State", protocol.QuotedValue{ValueMarshaler: v}, metadata) } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } return nil } @@ -405,6 +520,9 @@ type Schedule struct { // The create rule. CreateRule *CreateRule `type:"structure"` + // Enable fast snapshot restore. + FastRestoreRule *FastRestoreRule `type:"structure"` + // The name of the schedule. Name *string `type:"string"` @@ -435,6 +553,11 @@ func (s *Schedule) Validate() error { invalidParams.AddNested("CreateRule", err.(aws.ErrInvalidParams)) } } + if s.FastRestoreRule != nil { + if err := s.FastRestoreRule.Validate(); err != nil { + invalidParams.AddNested("FastRestoreRule", err.(aws.ErrInvalidParams)) + } + } if s.RetainRule != nil { if err := s.RetainRule.Validate(); err != nil { invalidParams.AddNested("RetainRule", err.(aws.ErrInvalidParams)) @@ -475,6 +598,12 @@ func (s Schedule) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "CreateRule", v, metadata) } + if s.FastRestoreRule != nil { + v := s.FastRestoreRule + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "FastRestoreRule", v, metadata) + } if s.Name != nil { v := *s.Name diff --git a/service/dlm/dlmiface/interface.go b/service/dlm/dlmiface/interface.go index dec4c12b422..c9d03d74245 100644 --- a/service/dlm/dlmiface/interface.go +++ b/service/dlm/dlmiface/interface.go @@ -69,6 +69,12 @@ type ClientAPI interface { GetLifecyclePolicyRequest(*dlm.GetLifecyclePolicyInput) dlm.GetLifecyclePolicyRequest + ListTagsForResourceRequest(*dlm.ListTagsForResourceInput) dlm.ListTagsForResourceRequest + + TagResourceRequest(*dlm.TagResourceInput) dlm.TagResourceRequest + + UntagResourceRequest(*dlm.UntagResourceInput) dlm.UntagResourceRequest + UpdateLifecyclePolicyRequest(*dlm.UpdateLifecyclePolicyInput) dlm.UpdateLifecyclePolicyRequest } diff --git a/service/ec2/api_enums.go b/service/ec2/api_enums.go index c466b5d7599..d1c4f5e7e58 100644 --- a/service/ec2/api_enums.go +++ b/service/ec2/api_enums.go @@ -839,6 +839,26 @@ func (enum ExportTaskState) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type FastSnapshotRestoreStateCode string + +// Enum values for FastSnapshotRestoreStateCode +const ( + FastSnapshotRestoreStateCodeEnabling FastSnapshotRestoreStateCode = "enabling" + FastSnapshotRestoreStateCodeOptimizing FastSnapshotRestoreStateCode = "optimizing" + FastSnapshotRestoreStateCodeEnabled FastSnapshotRestoreStateCode = "enabled" + FastSnapshotRestoreStateCodeDisabling FastSnapshotRestoreStateCode = "disabling" + FastSnapshotRestoreStateCodeDisabled FastSnapshotRestoreStateCode = "disabled" +) + +func (enum FastSnapshotRestoreStateCode) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum FastSnapshotRestoreStateCode) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type FleetActivityStatus string // Enum values for FleetActivityStatus @@ -1056,6 +1076,23 @@ func (enum HostTenancy) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type HttpTokensState string + +// Enum values for HttpTokensState +const ( + HttpTokensStateOptional HttpTokensState = "optional" + HttpTokensStateRequired HttpTokensState = "required" +) + +func (enum HttpTokensState) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum HttpTokensState) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type HypervisorType string // Enum values for HypervisorType @@ -1269,6 +1306,40 @@ func (enum InstanceMatchCriteria) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type InstanceMetadataEndpointState string + +// Enum values for InstanceMetadataEndpointState +const ( + InstanceMetadataEndpointStateDisabled InstanceMetadataEndpointState = "disabled" + InstanceMetadataEndpointStateEnabled InstanceMetadataEndpointState = "enabled" +) + +func (enum InstanceMetadataEndpointState) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum InstanceMetadataEndpointState) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type InstanceMetadataOptionsState string + +// Enum values for InstanceMetadataOptionsState +const ( + InstanceMetadataOptionsStatePending InstanceMetadataOptionsState = "pending" + InstanceMetadataOptionsStateApplied InstanceMetadataOptionsState = "applied" +) + +func (enum InstanceMetadataOptionsState) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum InstanceMetadataOptionsState) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type InstanceStateName string // Enum values for InstanceStateName diff --git a/service/ec2/api_op_AttachVolume.go b/service/ec2/api_op_AttachVolume.go index da4b76cc070..d8f36b5415f 100644 --- a/service/ec2/api_op_AttachVolume.go +++ b/service/ec2/api_op_AttachVolume.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// Contains the parameters for AttachVolume. type AttachVolumeInput struct { _ struct{} `type:"structure"` diff --git a/service/ec2/api_op_CopySnapshot.go b/service/ec2/api_op_CopySnapshot.go index e649d0dfcb1..72fc8499b50 100644 --- a/service/ec2/api_op_CopySnapshot.go +++ b/service/ec2/api_op_CopySnapshot.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// Contains the parameters for CopySnapshot. type CopySnapshotInput struct { _ struct{} `type:"structure"` @@ -84,6 +83,9 @@ type CopySnapshotInput struct { // // SourceSnapshotId is a required field SourceSnapshotId *string `type:"string" required:"true"` + + // The tags to apply to the new snapshot. + TagSpecifications []TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -109,12 +111,14 @@ func (s *CopySnapshotInput) Validate() error { return nil } -// Contains the output of CopySnapshot. type CopySnapshotOutput struct { _ struct{} `type:"structure"` // The ID of the new snapshot. SnapshotId *string `locationName:"snapshotId" type:"string"` + + // Any tags applied to the new snapshot. + Tags []Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation diff --git a/service/ec2/api_op_CreateCustomerGateway.go b/service/ec2/api_op_CreateCustomerGateway.go index f0db058a569..61bc965810a 100644 --- a/service/ec2/api_op_CreateCustomerGateway.go +++ b/service/ec2/api_op_CreateCustomerGateway.go @@ -23,6 +23,11 @@ type CreateCustomerGatewayInput struct { // The Amazon Resource Name (ARN) for the customer gateway certificate. CertificateArn *string `type:"string"` + // A name for the customer gateway device. + // + // Length Constraints: Up to 255 characters. + DeviceName *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -98,11 +103,10 @@ const opCreateCustomerGateway = "CreateCustomerGateway" // For more information, see AWS Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) // in the AWS Site-to-Site VPN User Guide. // -// You cannot create more than one customer gateway with the same VPN type, -// IP address, and BGP ASN parameter values. If you run an identical request -// more than one time, the first request creates the customer gateway, and subsequent -// requests return information about the existing customer gateway. The subsequent -// requests do not create new customer gateway resources. +// To create more than one customer gateway with the same VPN type, IP address, +// and BGP ASN, specify a unique device name for each customer gateway. Identical +// requests return information about the existing customer gateway and do not +// create new customer gateways. // // // Example sending a request using CreateCustomerGatewayRequest. // req := client.CreateCustomerGatewayRequest(params) diff --git a/service/ec2/api_op_CreateSnapshot.go b/service/ec2/api_op_CreateSnapshot.go index 16ce5fd8a00..6bd22237e32 100644 --- a/service/ec2/api_op_CreateSnapshot.go +++ b/service/ec2/api_op_CreateSnapshot.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// Contains the parameters for CreateSnapshot. type CreateSnapshotInput struct { _ struct{} `type:"structure"` diff --git a/service/ec2/api_op_CreateSnapshots.go b/service/ec2/api_op_CreateSnapshots.go index 50a31e39ed3..647ec22053c 100644 --- a/service/ec2/api_op_CreateSnapshots.go +++ b/service/ec2/api_op_CreateSnapshots.go @@ -18,9 +18,10 @@ type CreateSnapshotsInput struct { // A description propagated to every snapshot specified by the instance. Description *string `type:"string"` - // Checks whether you have the required permissions for the action without actually - // making the request. Provides an error response. If you have the required - // permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` // The instance to specify which volumes should be included in the snapshots. diff --git a/service/ec2/api_op_CreateVolume.go b/service/ec2/api_op_CreateVolume.go index 111ad455a73..770ae4e1ae6 100644 --- a/service/ec2/api_op_CreateVolume.go +++ b/service/ec2/api_op_CreateVolume.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// Contains the parameters for CreateVolume. type CreateVolumeInput struct { _ struct{} `type:"structure"` @@ -129,6 +128,9 @@ type CreateVolumeOutput struct { // Indicates whether the volume is encrypted. Encrypted *bool `locationName:"encrypted" type:"boolean"` + // Indicates whether the volume was created using fast snapshot restore. + FastRestored *bool `locationName:"fastRestored" type:"boolean"` + // The number of I/O operations per second (IOPS) that the volume supports. // For Provisioned IOPS SSD volumes, this represents the number of IOPS that // are provisioned for the volume. For General Purpose SSD volumes, this represents diff --git a/service/ec2/api_op_DeleteSnapshot.go b/service/ec2/api_op_DeleteSnapshot.go index 9882e4566b6..b7b571dbe49 100644 --- a/service/ec2/api_op_DeleteSnapshot.go +++ b/service/ec2/api_op_DeleteSnapshot.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go-v2/private/protocol/ec2query" ) -// Contains the parameters for DeleteSnapshot. type DeleteSnapshotInput struct { _ struct{} `type:"structure"` diff --git a/service/ec2/api_op_DeleteVolume.go b/service/ec2/api_op_DeleteVolume.go index 234ee39f2e5..9a48c053490 100644 --- a/service/ec2/api_op_DeleteVolume.go +++ b/service/ec2/api_op_DeleteVolume.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go-v2/private/protocol/ec2query" ) -// Contains the parameters for DeleteVolume. type DeleteVolumeInput struct { _ struct{} `type:"structure"` diff --git a/service/ec2/api_op_DescribeExportImageTasks.go b/service/ec2/api_op_DescribeExportImageTasks.go index 183d9385bef..47b721aab58 100644 --- a/service/ec2/api_op_DescribeExportImageTasks.go +++ b/service/ec2/api_op_DescribeExportImageTasks.go @@ -86,6 +86,12 @@ func (c *Client) DescribeExportImageTasksRequest(input *DescribeExportImageTasks Name: opDescribeExportImageTasks, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -120,6 +126,53 @@ func (r DescribeExportImageTasksRequest) Send(ctx context.Context) (*DescribeExp return resp, nil } +// NewDescribeExportImageTasksRequestPaginator returns a paginator for DescribeExportImageTasks. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.DescribeExportImageTasksRequest(input) +// p := ec2.NewDescribeExportImageTasksRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewDescribeExportImageTasksPaginator(req DescribeExportImageTasksRequest) DescribeExportImageTasksPaginator { + return DescribeExportImageTasksPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *DescribeExportImageTasksInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// DescribeExportImageTasksPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type DescribeExportImageTasksPaginator struct { + aws.Pager +} + +func (p *DescribeExportImageTasksPaginator) CurrentPage() *DescribeExportImageTasksOutput { + return p.Pager.CurrentPage().(*DescribeExportImageTasksOutput) +} + // DescribeExportImageTasksResponse is the response type for the // DescribeExportImageTasks API operation. type DescribeExportImageTasksResponse struct { diff --git a/service/ec2/api_op_DescribeFastSnapshotRestores.go b/service/ec2/api_op_DescribeFastSnapshotRestores.go new file mode 100644 index 00000000000..4c1ec3f5a5b --- /dev/null +++ b/service/ec2/api_op_DescribeFastSnapshotRestores.go @@ -0,0 +1,181 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ec2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeFastSnapshotRestoresInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The filters. The possible values are: + // + // * availability-zone: The Availability Zone of the snapshot. + // + // * owner-id: The ID of the AWS account that owns the snapshot. + // + // * snapshot-id: The ID of the snapshot. + // + // * state: The state of fast snapshot restores for the snapshot (enabling + // | optimizing | enabled | disabling | disabled). + Filters []Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeFastSnapshotRestoresInput) String() string { + return awsutil.Prettify(s) +} + +type DescribeFastSnapshotRestoresOutput struct { + _ struct{} `type:"structure"` + + // Information about the state of fast snapshot restores. + FastSnapshotRestores []DescribeFastSnapshotRestoreSuccessItem `locationName:"fastSnapshotRestoreSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeFastSnapshotRestoresOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeFastSnapshotRestores = "DescribeFastSnapshotRestores" + +// DescribeFastSnapshotRestoresRequest returns a request value for making API operation for +// Amazon Elastic Compute Cloud. +// +// Describes the state of fast snapshot restores for your snapshots. +// +// // Example sending a request using DescribeFastSnapshotRestoresRequest. +// req := client.DescribeFastSnapshotRestoresRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFastSnapshotRestores +func (c *Client) DescribeFastSnapshotRestoresRequest(input *DescribeFastSnapshotRestoresInput) DescribeFastSnapshotRestoresRequest { + op := &aws.Operation{ + Name: opDescribeFastSnapshotRestores, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeFastSnapshotRestoresInput{} + } + + req := c.newRequest(op, input, &DescribeFastSnapshotRestoresOutput{}) + return DescribeFastSnapshotRestoresRequest{Request: req, Input: input, Copy: c.DescribeFastSnapshotRestoresRequest} +} + +// DescribeFastSnapshotRestoresRequest is the request type for the +// DescribeFastSnapshotRestores API operation. +type DescribeFastSnapshotRestoresRequest struct { + *aws.Request + Input *DescribeFastSnapshotRestoresInput + Copy func(*DescribeFastSnapshotRestoresInput) DescribeFastSnapshotRestoresRequest +} + +// Send marshals and sends the DescribeFastSnapshotRestores API request. +func (r DescribeFastSnapshotRestoresRequest) Send(ctx context.Context) (*DescribeFastSnapshotRestoresResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeFastSnapshotRestoresResponse{ + DescribeFastSnapshotRestoresOutput: r.Request.Data.(*DescribeFastSnapshotRestoresOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewDescribeFastSnapshotRestoresRequestPaginator returns a paginator for DescribeFastSnapshotRestores. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.DescribeFastSnapshotRestoresRequest(input) +// p := ec2.NewDescribeFastSnapshotRestoresRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewDescribeFastSnapshotRestoresPaginator(req DescribeFastSnapshotRestoresRequest) DescribeFastSnapshotRestoresPaginator { + return DescribeFastSnapshotRestoresPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *DescribeFastSnapshotRestoresInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// DescribeFastSnapshotRestoresPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type DescribeFastSnapshotRestoresPaginator struct { + aws.Pager +} + +func (p *DescribeFastSnapshotRestoresPaginator) CurrentPage() *DescribeFastSnapshotRestoresOutput { + return p.Pager.CurrentPage().(*DescribeFastSnapshotRestoresOutput) +} + +// DescribeFastSnapshotRestoresResponse is the response type for the +// DescribeFastSnapshotRestores API operation. +type DescribeFastSnapshotRestoresResponse struct { + *DescribeFastSnapshotRestoresOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeFastSnapshotRestores request. +func (r *DescribeFastSnapshotRestoresResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/ec2/api_op_DescribeInstances.go b/service/ec2/api_op_DescribeInstances.go index b9f527caee1..29fb1bc6cae 100644 --- a/service/ec2/api_op_DescribeInstances.go +++ b/service/ec2/api_op_DescribeInstances.go @@ -62,9 +62,8 @@ type DescribeInstancesInput struct { // * hypervisor - The hypervisor type of the instance (ovm | xen). // // * iam-instance-profile.arn - The instance profile associated with the - // instance. Specified as an ARN. - // - // * image-id - The ID of the image used to launch the instance. + // instance. Specified as an ARN. image-id - The ID of the image used to + // launch the instance. // // * instance-id - The ID of the instance. // @@ -97,6 +96,15 @@ type DescribeInstancesInput struct { // // * launch-time - The time when the instance was launched. // + // * metadata-http-tokens - The metadata request authorization state (optional + // | required) + // + // * metadata-http-put-response-hop-limit - The http metadata request put + // response hop limit (integer, possible values 1 to 64) + // + // * metadata-http-endpoint - Enable or disable metadata access on http endpoint + // (enabled | disabled) + // // * monitoring-state - Indicates whether detailed monitoring is enabled // (disabled | enabled). // diff --git a/service/ec2/api_op_DescribeSnapshotAttribute.go b/service/ec2/api_op_DescribeSnapshotAttribute.go index 71b68ae27d6..8054b7fa367 100644 --- a/service/ec2/api_op_DescribeSnapshotAttribute.go +++ b/service/ec2/api_op_DescribeSnapshotAttribute.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// Contains the parameters for DescribeSnapshotAttribute. type DescribeSnapshotAttributeInput struct { _ struct{} `type:"structure"` @@ -52,7 +51,6 @@ func (s *DescribeSnapshotAttributeInput) Validate() error { return nil } -// Contains the output of DescribeSnapshotAttribute. type DescribeSnapshotAttributeOutput struct { _ struct{} `type:"structure"` diff --git a/service/ec2/api_op_DescribeVolumeAttribute.go b/service/ec2/api_op_DescribeVolumeAttribute.go index 8e0569fadfd..b19a75460de 100644 --- a/service/ec2/api_op_DescribeVolumeAttribute.go +++ b/service/ec2/api_op_DescribeVolumeAttribute.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// Contains the parameters for DescribeVolumeAttribute. type DescribeVolumeAttributeInput struct { _ struct{} `type:"structure"` @@ -52,7 +51,6 @@ func (s *DescribeVolumeAttributeInput) Validate() error { return nil } -// Contains the output of DescribeVolumeAttribute. type DescribeVolumeAttributeOutput struct { _ struct{} `type:"structure"` diff --git a/service/ec2/api_op_DescribeVpnConnections.go b/service/ec2/api_op_DescribeVpnConnections.go index 4444dc5d734..33e66c1a183 100644 --- a/service/ec2/api_op_DescribeVpnConnections.go +++ b/service/ec2/api_op_DescribeVpnConnections.go @@ -57,6 +57,9 @@ type DescribeVpnConnectionsInput struct { // // * vpn-gateway-id - The ID of a virtual private gateway associated with // the VPN connection. + // + // * transit-gateway-id - The ID of a transit gateway associated with the + // VPN connection. Filters []Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more VPN connection IDs. diff --git a/service/ec2/api_op_DetachVolume.go b/service/ec2/api_op_DetachVolume.go index 76c3f5ecf5e..ecbfa5af75c 100644 --- a/service/ec2/api_op_DetachVolume.go +++ b/service/ec2/api_op_DetachVolume.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/internal/awsutil" ) -// Contains the parameters for DetachVolume. type DetachVolumeInput struct { _ struct{} `type:"structure"` diff --git a/service/ec2/api_op_DisableFastSnapshotRestores.go b/service/ec2/api_op_DisableFastSnapshotRestores.go new file mode 100644 index 00000000000..3f2ec089a65 --- /dev/null +++ b/service/ec2/api_op_DisableFastSnapshotRestores.go @@ -0,0 +1,139 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ec2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DisableFastSnapshotRestoresInput struct { + _ struct{} `type:"structure"` + + // One or more Availability Zones. For example, us-east-2a. + // + // AvailabilityZones is a required field + AvailabilityZones []string `locationName:"AvailabilityZone" locationNameList:"AvailabilityZone" type:"list" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of one or more snapshots. For example, snap-1234567890abcdef0. + // + // SourceSnapshotIds is a required field + SourceSnapshotIds []string `locationName:"SourceSnapshotId" locationNameList:"SnapshotId" type:"list" required:"true"` +} + +// String returns the string representation +func (s DisableFastSnapshotRestoresInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableFastSnapshotRestoresInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DisableFastSnapshotRestoresInput"} + + if s.AvailabilityZones == nil { + invalidParams.Add(aws.NewErrParamRequired("AvailabilityZones")) + } + + if s.SourceSnapshotIds == nil { + invalidParams.Add(aws.NewErrParamRequired("SourceSnapshotIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DisableFastSnapshotRestoresOutput struct { + _ struct{} `type:"structure"` + + // Information about the snapshots for which fast snapshot restores were successfully + // disabled. + Successful []DisableFastSnapshotRestoreSuccessItem `locationName:"successful" locationNameList:"item" type:"list"` + + // Information about the snapshots for which fast snapshot restores could not + // be disabled. + Unsuccessful []DisableFastSnapshotRestoreErrorItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DisableFastSnapshotRestoresOutput) String() string { + return awsutil.Prettify(s) +} + +const opDisableFastSnapshotRestores = "DisableFastSnapshotRestores" + +// DisableFastSnapshotRestoresRequest returns a request value for making API operation for +// Amazon Elastic Compute Cloud. +// +// Disables fast snapshot restores for the specified snapshots in the specified +// Availability Zones. +// +// // Example sending a request using DisableFastSnapshotRestoresRequest. +// req := client.DisableFastSnapshotRestoresRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableFastSnapshotRestores +func (c *Client) DisableFastSnapshotRestoresRequest(input *DisableFastSnapshotRestoresInput) DisableFastSnapshotRestoresRequest { + op := &aws.Operation{ + Name: opDisableFastSnapshotRestores, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableFastSnapshotRestoresInput{} + } + + req := c.newRequest(op, input, &DisableFastSnapshotRestoresOutput{}) + return DisableFastSnapshotRestoresRequest{Request: req, Input: input, Copy: c.DisableFastSnapshotRestoresRequest} +} + +// DisableFastSnapshotRestoresRequest is the request type for the +// DisableFastSnapshotRestores API operation. +type DisableFastSnapshotRestoresRequest struct { + *aws.Request + Input *DisableFastSnapshotRestoresInput + Copy func(*DisableFastSnapshotRestoresInput) DisableFastSnapshotRestoresRequest +} + +// Send marshals and sends the DisableFastSnapshotRestores API request. +func (r DisableFastSnapshotRestoresRequest) Send(ctx context.Context) (*DisableFastSnapshotRestoresResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DisableFastSnapshotRestoresResponse{ + DisableFastSnapshotRestoresOutput: r.Request.Data.(*DisableFastSnapshotRestoresOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DisableFastSnapshotRestoresResponse is the response type for the +// DisableFastSnapshotRestores API operation. +type DisableFastSnapshotRestoresResponse struct { + *DisableFastSnapshotRestoresOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DisableFastSnapshotRestores request. +func (r *DisableFastSnapshotRestoresResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/ec2/api_op_EnableFastSnapshotRestores.go b/service/ec2/api_op_EnableFastSnapshotRestores.go new file mode 100644 index 00000000000..5b3c2173408 --- /dev/null +++ b/service/ec2/api_op_EnableFastSnapshotRestores.go @@ -0,0 +1,144 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ec2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type EnableFastSnapshotRestoresInput struct { + _ struct{} `type:"structure"` + + // One or more Availability Zones. For example, us-east-2a. + // + // AvailabilityZones is a required field + AvailabilityZones []string `locationName:"AvailabilityZone" locationNameList:"AvailabilityZone" type:"list" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of one or more snapshots. For example, snap-1234567890abcdef0. You + // can specify a snapshot that was shared with you from another AWS account. + // + // SourceSnapshotIds is a required field + SourceSnapshotIds []string `locationName:"SourceSnapshotId" locationNameList:"SnapshotId" type:"list" required:"true"` +} + +// String returns the string representation +func (s EnableFastSnapshotRestoresInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableFastSnapshotRestoresInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "EnableFastSnapshotRestoresInput"} + + if s.AvailabilityZones == nil { + invalidParams.Add(aws.NewErrParamRequired("AvailabilityZones")) + } + + if s.SourceSnapshotIds == nil { + invalidParams.Add(aws.NewErrParamRequired("SourceSnapshotIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type EnableFastSnapshotRestoresOutput struct { + _ struct{} `type:"structure"` + + // Information about the snapshots for which fast snapshot restores were successfully + // enabled. + Successful []EnableFastSnapshotRestoreSuccessItem `locationName:"successful" locationNameList:"item" type:"list"` + + // Information about the snapshots for which fast snapshot restores could not + // be enabled. + Unsuccessful []EnableFastSnapshotRestoreErrorItem `locationName:"unsuccessful" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s EnableFastSnapshotRestoresOutput) String() string { + return awsutil.Prettify(s) +} + +const opEnableFastSnapshotRestores = "EnableFastSnapshotRestores" + +// EnableFastSnapshotRestoresRequest returns a request value for making API operation for +// Amazon Elastic Compute Cloud. +// +// Enables fast snapshot restores for the specified snapshots in the specified +// Availability Zones. +// +// You get the full benefit of fast snapshot restores after they enter the enabled +// state. To get the current state of fast snapshot restores, use DescribeFastSnapshotRestores. +// To disable fast snapshot restores, use DisableFastSnapshotRestores. +// +// // Example sending a request using EnableFastSnapshotRestoresRequest. +// req := client.EnableFastSnapshotRestoresRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableFastSnapshotRestores +func (c *Client) EnableFastSnapshotRestoresRequest(input *EnableFastSnapshotRestoresInput) EnableFastSnapshotRestoresRequest { + op := &aws.Operation{ + Name: opEnableFastSnapshotRestores, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableFastSnapshotRestoresInput{} + } + + req := c.newRequest(op, input, &EnableFastSnapshotRestoresOutput{}) + return EnableFastSnapshotRestoresRequest{Request: req, Input: input, Copy: c.EnableFastSnapshotRestoresRequest} +} + +// EnableFastSnapshotRestoresRequest is the request type for the +// EnableFastSnapshotRestores API operation. +type EnableFastSnapshotRestoresRequest struct { + *aws.Request + Input *EnableFastSnapshotRestoresInput + Copy func(*EnableFastSnapshotRestoresInput) EnableFastSnapshotRestoresRequest +} + +// Send marshals and sends the EnableFastSnapshotRestores API request. +func (r EnableFastSnapshotRestoresRequest) Send(ctx context.Context) (*EnableFastSnapshotRestoresResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &EnableFastSnapshotRestoresResponse{ + EnableFastSnapshotRestoresOutput: r.Request.Data.(*EnableFastSnapshotRestoresOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// EnableFastSnapshotRestoresResponse is the response type for the +// EnableFastSnapshotRestores API operation. +type EnableFastSnapshotRestoresResponse struct { + *EnableFastSnapshotRestoresOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// EnableFastSnapshotRestores request. +func (r *EnableFastSnapshotRestoresResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/ec2/api_op_EnableVolumeIO.go b/service/ec2/api_op_EnableVolumeIO.go index e0fb8558b81..7a8214836bc 100644 --- a/service/ec2/api_op_EnableVolumeIO.go +++ b/service/ec2/api_op_EnableVolumeIO.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go-v2/private/protocol/ec2query" ) -// Contains the parameters for EnableVolumeIO. type EnableVolumeIOInput struct { _ struct{} `type:"structure"` diff --git a/service/ec2/api_op_ModifyInstanceMetadataOptions.go b/service/ec2/api_op_ModifyInstanceMetadataOptions.go new file mode 100644 index 00000000000..616ac2781ba --- /dev/null +++ b/service/ec2/api_op_ModifyInstanceMetadataOptions.go @@ -0,0 +1,162 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ec2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ModifyInstanceMetadataOptionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // This parameter enables or disables the HTTP metadata endpoint on your instances. + // If the parameter is not specified, the existing state is maintained. + // + // If you specify a value of disabled, you will not be able to access your instance + // metadata. + HttpEndpoint InstanceMetadataEndpointState `type:"string" enum:"true"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The + // larger the number, the further instance metadata requests can travel. If + // no parameter is specified, the existing state is maintained. + // + // Possible values: Integers from 1 to 64 + HttpPutResponseHopLimit *int64 `type:"integer"` + + // The state of token usage for your instance metadata requests. If the parameter + // is not specified in the request, the default state is optional. + // + // If the state is optional, you can choose to retrieve instance metadata with + // or without a signed token header on your request. If you retrieve the IAM + // role credentials without a token, the version 1.0 role credentials are returned. + // If you retrieve the IAM role credentials using a valid signed token, the + // version 2.0 role credentials are returned. + // + // If the state is required, you must send a signed token header with any instance + // metadata retrieval requests. In this state, retrieving the IAM role credential + // always returns the version 2.0 credentials; the version 1.0 credentials are + // not available. + HttpTokens HttpTokensState `type:"string" enum:"true"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyInstanceMetadataOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyInstanceMetadataOptionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ModifyInstanceMetadataOptionsInput"} + + if s.InstanceId == nil { + invalidParams.Add(aws.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyInstanceMetadataOptionsOutput struct { + _ struct{} `type:"structure"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The metadata options for the instance. + InstanceMetadataOptions *InstanceMetadataOptionsResponse `locationName:"instanceMetadataOptions" type:"structure"` +} + +// String returns the string representation +func (s ModifyInstanceMetadataOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +const opModifyInstanceMetadataOptions = "ModifyInstanceMetadataOptions" + +// ModifyInstanceMetadataOptionsRequest returns a request value for making API operation for +// Amazon Elastic Compute Cloud. +// +// Modify the instance metadata parameters on a running or stopped instance. +// When you modify the parameters on a stopped instance, they are applied when +// the instance is started. When you modify the parameters on a running instance, +// the API responds with a state of “pending”. After the parameter modifications +// are successfully applied to the instance, the state of the modifications +// changes from “pending” to “applied” in subsequent describe-instances +// API calls. For more information, see Instance Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). +// +// // Example sending a request using ModifyInstanceMetadataOptionsRequest. +// req := client.ModifyInstanceMetadataOptionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyInstanceMetadataOptions +func (c *Client) ModifyInstanceMetadataOptionsRequest(input *ModifyInstanceMetadataOptionsInput) ModifyInstanceMetadataOptionsRequest { + op := &aws.Operation{ + Name: opModifyInstanceMetadataOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyInstanceMetadataOptionsInput{} + } + + req := c.newRequest(op, input, &ModifyInstanceMetadataOptionsOutput{}) + return ModifyInstanceMetadataOptionsRequest{Request: req, Input: input, Copy: c.ModifyInstanceMetadataOptionsRequest} +} + +// ModifyInstanceMetadataOptionsRequest is the request type for the +// ModifyInstanceMetadataOptions API operation. +type ModifyInstanceMetadataOptionsRequest struct { + *aws.Request + Input *ModifyInstanceMetadataOptionsInput + Copy func(*ModifyInstanceMetadataOptionsInput) ModifyInstanceMetadataOptionsRequest +} + +// Send marshals and sends the ModifyInstanceMetadataOptions API request. +func (r ModifyInstanceMetadataOptionsRequest) Send(ctx context.Context) (*ModifyInstanceMetadataOptionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ModifyInstanceMetadataOptionsResponse{ + ModifyInstanceMetadataOptionsOutput: r.Request.Data.(*ModifyInstanceMetadataOptionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ModifyInstanceMetadataOptionsResponse is the response type for the +// ModifyInstanceMetadataOptions API operation. +type ModifyInstanceMetadataOptionsResponse struct { + *ModifyInstanceMetadataOptionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ModifyInstanceMetadataOptions request. +func (r *ModifyInstanceMetadataOptionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/ec2/api_op_ModifySnapshotAttribute.go b/service/ec2/api_op_ModifySnapshotAttribute.go index b9204a8223e..4406c67e192 100644 --- a/service/ec2/api_op_ModifySnapshotAttribute.go +++ b/service/ec2/api_op_ModifySnapshotAttribute.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go-v2/private/protocol/ec2query" ) -// Contains the parameters for ModifySnapshotAttribute. type ModifySnapshotAttributeInput struct { _ struct{} `type:"structure"` diff --git a/service/ec2/api_op_ModifyVolumeAttribute.go b/service/ec2/api_op_ModifyVolumeAttribute.go index 69b0b49d5af..ab495bfe193 100644 --- a/service/ec2/api_op_ModifyVolumeAttribute.go +++ b/service/ec2/api_op_ModifyVolumeAttribute.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go-v2/private/protocol/ec2query" ) -// Contains the parameters for ModifyVolumeAttribute. type ModifyVolumeAttributeInput struct { _ struct{} `type:"structure"` diff --git a/service/ec2/api_op_ResetSnapshotAttribute.go b/service/ec2/api_op_ResetSnapshotAttribute.go index 59d7cab1eef..46e2eabfa17 100644 --- a/service/ec2/api_op_ResetSnapshotAttribute.go +++ b/service/ec2/api_op_ResetSnapshotAttribute.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go-v2/private/protocol/ec2query" ) -// Contains the parameters for ResetSnapshotAttribute. type ResetSnapshotAttributeInput struct { _ struct{} `type:"structure"` diff --git a/service/ec2/api_op_RunInstances.go b/service/ec2/api_op_RunInstances.go index cd26adc4afa..8ce4e82de0a 100644 --- a/service/ec2/api_op_RunInstances.go +++ b/service/ec2/api_op_RunInstances.go @@ -163,6 +163,10 @@ type RunInstancesInput struct { // MaxCount is a required field MaxCount *int64 `type:"integer" required:"true"` + // The metadata options for the instance. For more information, see Instance + // Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). + MetadataOptions *InstanceMetadataOptionsRequest `type:"structure"` + // The minimum number of instances to launch. If you specify a minimum that // is more instances than Amazon EC2 can launch in the target Availability Zone, // Amazon EC2 launches no instances. diff --git a/service/ec2/api_types.go b/service/ec2/api_types.go index ee4b64d3ca7..a19aaacad8c 100644 --- a/service/ec2/api_types.go +++ b/service/ec2/api_types.go @@ -1154,6 +1154,9 @@ type CustomerGateway struct { // The ID of the customer gateway. CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` + // The name of customer gateway device. + DeviceName *string `locationName:"deviceName" type:"string"` + // The Internet-routable IP address of the customer gateway's outside interface. IpAddress *string `locationName:"ipAddress" type:"string"` @@ -1304,6 +1307,55 @@ func (s DeleteQueuedReservedInstancesError) String() string { return awsutil.Prettify(s) } +// Describes fast snapshot restores for a snapshot. +type DescribeFastSnapshotRestoreSuccessItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The time at which fast snapshot restores entered the disabled state. + DisabledTime *time.Time `locationName:"disabledTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the disabling state. + DisablingTime *time.Time `locationName:"disablingTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the enabled state. + EnabledTime *time.Time `locationName:"enabledTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the enabling state. + EnablingTime *time.Time `locationName:"enablingTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the optimizing state. + OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` + + // The alias of the snapshot owner. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The ID of the AWS account that owns the snapshot. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The state of fast snapshot restores. + State FastSnapshotRestoreStateCode `locationName:"state" type:"string" enum:"true"` + + // The reason for the state transition. The possible values are as follows: + // + // * Client.UserInitiated - The state successfully transitioned to enabling + // or disabling. + // + // * Client.UserInitiated - Lifecycle state transition - The state successfully + // transitioned to optimizing, enabled, or disabled. + StateTransitionReason *string `locationName:"stateTransitionReason" type:"string"` +} + +// String returns the string representation +func (s DescribeFastSnapshotRestoreSuccessItem) String() string { + return awsutil.Prettify(s) +} + // Describes the instances that could not be launched by the fleet. type DescribeFleetError struct { _ struct{} `type:"structure"` @@ -1423,6 +1475,105 @@ func (s DirectoryServiceAuthenticationRequest) String() string { return awsutil.Prettify(s) } +// Contains information about the errors that occurred when disabling fast snapshot +// restores. +type DisableFastSnapshotRestoreErrorItem struct { + _ struct{} `type:"structure"` + + // The errors. + FastSnapshotRestoreStateErrors []DisableFastSnapshotRestoreStateErrorItem `locationName:"fastSnapshotRestoreStateErrorSet" locationNameList:"item" type:"list"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` +} + +// String returns the string representation +func (s DisableFastSnapshotRestoreErrorItem) String() string { + return awsutil.Prettify(s) +} + +// Describes an error that occurred when disabling fast snapshot restores. +type DisableFastSnapshotRestoreStateError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string"` + + // The error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s DisableFastSnapshotRestoreStateError) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an error that occurred when disabling fast snapshot +// restores. +type DisableFastSnapshotRestoreStateErrorItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The error. + Error *DisableFastSnapshotRestoreStateError `locationName:"error" type:"structure"` +} + +// String returns the string representation +func (s DisableFastSnapshotRestoreStateErrorItem) String() string { + return awsutil.Prettify(s) +} + +// Describes fast snapshot restores that were successfully disabled. +type DisableFastSnapshotRestoreSuccessItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The time at which fast snapshot restores entered the disabled state. + DisabledTime *time.Time `locationName:"disabledTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the disabling state. + DisablingTime *time.Time `locationName:"disablingTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the enabled state. + EnabledTime *time.Time `locationName:"enabledTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the enabling state. + EnablingTime *time.Time `locationName:"enablingTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the optimizing state. + OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` + + // The alias of the snapshot owner. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The ID of the AWS account that owns the snapshot. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The state of fast snapshot restores for the snapshot. + State FastSnapshotRestoreStateCode `locationName:"state" type:"string" enum:"true"` + + // The reason for the state transition. The possible values are as follows: + // + // * Client.UserInitiated - The state successfully transitioned to enabling + // or disabling. + // + // * Client.UserInitiated - Lifecycle state transition - The state successfully + // transitioned to optimizing, enabled, or disabled. + StateTransitionReason *string `locationName:"stateTransitionReason" type:"string"` +} + +// String returns the string representation +func (s DisableFastSnapshotRestoreSuccessItem) String() string { + return awsutil.Prettify(s) +} + // Describes a disk image. type DiskImage struct { _ struct{} `type:"structure"` @@ -1886,6 +2037,105 @@ func (s ElasticInferenceAcceleratorAssociation) String() string { return awsutil.Prettify(s) } +// Contains information about the errors that occurred when enabling fast snapshot +// restores. +type EnableFastSnapshotRestoreErrorItem struct { + _ struct{} `type:"structure"` + + // The errors. + FastSnapshotRestoreStateErrors []EnableFastSnapshotRestoreStateErrorItem `locationName:"fastSnapshotRestoreStateErrorSet" locationNameList:"item" type:"list"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` +} + +// String returns the string representation +func (s EnableFastSnapshotRestoreErrorItem) String() string { + return awsutil.Prettify(s) +} + +// Describes an error that occurred when enabling fast snapshot restores. +type EnableFastSnapshotRestoreStateError struct { + _ struct{} `type:"structure"` + + // The error code. + Code *string `locationName:"code" type:"string"` + + // The error message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s EnableFastSnapshotRestoreStateError) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an error that occurred when enabling fast snapshot +// restores. +type EnableFastSnapshotRestoreStateErrorItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The error. + Error *EnableFastSnapshotRestoreStateError `locationName:"error" type:"structure"` +} + +// String returns the string representation +func (s EnableFastSnapshotRestoreStateErrorItem) String() string { + return awsutil.Prettify(s) +} + +// Describes fast snapshot restores that were successfully enabled. +type EnableFastSnapshotRestoreSuccessItem struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The time at which fast snapshot restores entered the disabled state. + DisabledTime *time.Time `locationName:"disabledTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the disabling state. + DisablingTime *time.Time `locationName:"disablingTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the enabled state. + EnabledTime *time.Time `locationName:"enabledTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the enabling state. + EnablingTime *time.Time `locationName:"enablingTime" type:"timestamp"` + + // The time at which fast snapshot restores entered the optimizing state. + OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` + + // The alias of the snapshot owner. + OwnerAlias *string `locationName:"ownerAlias" type:"string"` + + // The ID of the AWS account that owns the snapshot. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The ID of the snapshot. + SnapshotId *string `locationName:"snapshotId" type:"string"` + + // The state of fast snapshot restores. + State FastSnapshotRestoreStateCode `locationName:"state" type:"string" enum:"true"` + + // The reason for the state transition. The possible values are as follows: + // + // * Client.UserInitiated - The state successfully transitioned to enabling + // or disabling. + // + // * Client.UserInitiated - Lifecycle state transition - The state successfully + // transitioned to optimizing, enabled, or disabled. + StateTransitionReason *string `locationName:"stateTransitionReason" type:"string"` +} + +// String returns the string representation +func (s EnableFastSnapshotRestoreSuccessItem) String() string { + return awsutil.Prettify(s) +} + // Describes an EC2 Fleet or Spot Fleet event. type EventInformation struct { _ struct{} `type:"structure"` @@ -3420,6 +3670,9 @@ type Instance struct { // The license configurations. Licenses []LicenseConfiguration `locationName:"licenseSet" locationNameList:"item" type:"list"` + // The metadata options for the instance. + MetadataOptions *InstanceMetadataOptionsResponse `locationName:"metadataOptions" type:"structure"` + // The monitoring for the instance. Monitoring *Monitoring `locationName:"monitoring" type:"structure"` @@ -3678,6 +3931,94 @@ func (s InstanceMarketOptionsRequest) String() string { return awsutil.Prettify(s) } +// The metadata options for the instance. +type InstanceMetadataOptionsRequest struct { + _ struct{} `type:"structure"` + + // This parameter enables or disables the HTTP metadata endpoint on your instances. + // If the parameter is not specified, the default state is enabled. + // + // If you specify a value of disabled, you will not be able to access your instance + // metadata. + HttpEndpoint InstanceMetadataEndpointState `type:"string" enum:"true"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The + // larger the number, the further instance metadata requests can travel. + // + // Default: 1 + // + // Possible values: Integers from 1 to 64 + HttpPutResponseHopLimit *int64 `type:"integer"` + + // The state of token usage for your instance metadata requests. If the parameter + // is not specified in the request, the default state is optional. + // + // If the state is optional, you can choose to retrieve instance metadata with + // or without a signed token header on your request. If you retrieve the IAM + // role credentials without a token, the version 1.0 role credentials are returned. + // If you retrieve the IAM role credentials using a valid signed token, the + // version 2.0 role credentials are returned. + // + // If the state is required, you must send a signed token header with any instance + // metadata retrieval requests. In this state, retrieving the IAM role credentials + // always returns the version 2.0 credentials; the version 1.0 credentials are + // not available. + HttpTokens HttpTokensState `type:"string" enum:"true"` +} + +// String returns the string representation +func (s InstanceMetadataOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// The metadata options for the instance. +type InstanceMetadataOptionsResponse struct { + _ struct{} `type:"structure"` + + // This parameter enables or disables the HTTP metadata endpoint on your instances. + // If the parameter is not specified, the default state is enabled. + // + // If you specify a value of disabled, you will not be able to access your instance + // metadata. + HttpEndpoint InstanceMetadataEndpointState `locationName:"httpEndpoint" type:"string" enum:"true"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The + // larger the number, the further instance metadata requests can travel. + // + // Default: 1 + // + // Possible values: Integers from 1 to 64 + HttpPutResponseHopLimit *int64 `locationName:"httpPutResponseHopLimit" type:"integer"` + + // The state of token usage for your instance metadata requests. If the parameter + // is not specified in the request, the default state is optional. + // + // If the state is optional, you can choose to retrieve instance metadata with + // or without a signed token header on your request. If you retrieve the IAM + // role credentials without a token, the version 1.0 role credentials are returned. + // If you retrieve the IAM role credentials using a valid signed token, the + // version 2.0 role credentials are returned. + // + // If the state is required, you must send a signed token header with any instance + // metadata retrieval requests. In this state, retrieving the IAM role credential + // always returns the version 2.0 credentials; the version 1.0 credentials are + // not available. + HttpTokens HttpTokensState `locationName:"httpTokens" type:"string" enum:"true"` + + // The state of the metadata option changes. + // + // pending - The metadata options are being updated and the instance is not + // ready to process metadata traffic with the new selection. + // + // applied - The metadata options have been successfully applied on the instance. + State InstanceMetadataOptionsState `locationName:"state" type:"string" enum:"true"` +} + +// String returns the string representation +func (s InstanceMetadataOptionsResponse) String() string { + return awsutil.Prettify(s) +} + // Describes the monitoring of an instance. type InstanceMonitoring struct { _ struct{} `type:"structure"` @@ -10531,6 +10872,9 @@ type Volume struct { // Indicates whether the volume is encrypted. Encrypted *bool `locationName:"encrypted" type:"boolean"` + // Indicates whether the volume was created using fast snapshot restore. + FastRestored *bool `locationName:"fastRestored" type:"boolean"` + // The number of I/O operations per second (IOPS) that the volume supports. // For Provisioned IOPS SSD volumes, this represents the number of IOPS that // are provisioned for the volume. For General Purpose SSD volumes, this represents diff --git a/service/ec2/api_waiters.go b/service/ec2/api_waiters.go index 83742934dd3..57b7e5c253c 100644 --- a/service/ec2/api_waiters.go +++ b/service/ec2/api_waiters.go @@ -826,6 +826,50 @@ func (c *Client) WaitUntilPasswordDataAvailable(ctx context.Context, input *GetP return w.Wait(ctx) } +// WaitUntilSecurityGroupExists uses the Amazon EC2 API operation +// DescribeSecurityGroups to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Client) WaitUntilSecurityGroupExists(ctx context.Context, input *DescribeSecurityGroupsInput, opts ...aws.WaiterOption) error { + w := aws.Waiter{ + Name: "WaitUntilSecurityGroupExists", + MaxAttempts: 6, + Delay: aws.ConstantWaiterDelay(5 * time.Second), + Acceptors: []aws.WaiterAcceptor{ + { + State: aws.SuccessWaiterState, + Matcher: aws.PathWaiterMatch, Argument: "length(SecurityGroups[].GroupId) > `0`", + Expected: true, + }, + { + State: aws.RetryWaiterState, + Matcher: aws.ErrorWaiterMatch, + Expected: "InvalidGroupNotFound", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []aws.Option) (*aws.Request, error) { + var inCpy *DescribeSecurityGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req := c.DescribeSecurityGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req.Request, nil + }, + } + w.ApplyOptions(opts...) + + return w.Wait(ctx) +} + // WaitUntilSnapshotCompleted uses the Amazon EC2 API operation // DescribeSnapshots to wait for a condition to be met before returning. // If the condition is not met within the max attempt window, an error will diff --git a/service/ec2/ec2iface/interface.go b/service/ec2/ec2iface/interface.go index d8061bbeccb..526f49c7c34 100644 --- a/service/ec2/ec2iface/interface.go +++ b/service/ec2/ec2iface/interface.go @@ -370,6 +370,8 @@ type ClientAPI interface { DescribeExportTasksRequest(*ec2.DescribeExportTasksInput) ec2.DescribeExportTasksRequest + DescribeFastSnapshotRestoresRequest(*ec2.DescribeFastSnapshotRestoresInput) ec2.DescribeFastSnapshotRestoresRequest + DescribeFleetHistoryRequest(*ec2.DescribeFleetHistoryInput) ec2.DescribeFleetHistoryRequest DescribeFleetInstancesRequest(*ec2.DescribeFleetInstancesInput) ec2.DescribeFleetInstancesRequest @@ -540,6 +542,8 @@ type ClientAPI interface { DisableEbsEncryptionByDefaultRequest(*ec2.DisableEbsEncryptionByDefaultInput) ec2.DisableEbsEncryptionByDefaultRequest + DisableFastSnapshotRestoresRequest(*ec2.DisableFastSnapshotRestoresInput) ec2.DisableFastSnapshotRestoresRequest + DisableTransitGatewayRouteTablePropagationRequest(*ec2.DisableTransitGatewayRouteTablePropagationInput) ec2.DisableTransitGatewayRouteTablePropagationRequest DisableVgwRoutePropagationRequest(*ec2.DisableVgwRoutePropagationInput) ec2.DisableVgwRoutePropagationRequest @@ -564,6 +568,8 @@ type ClientAPI interface { EnableEbsEncryptionByDefaultRequest(*ec2.EnableEbsEncryptionByDefaultInput) ec2.EnableEbsEncryptionByDefaultRequest + EnableFastSnapshotRestoresRequest(*ec2.EnableFastSnapshotRestoresInput) ec2.EnableFastSnapshotRestoresRequest + EnableTransitGatewayRouteTablePropagationRequest(*ec2.EnableTransitGatewayRouteTablePropagationInput) ec2.EnableTransitGatewayRouteTablePropagationRequest EnableVgwRoutePropagationRequest(*ec2.EnableVgwRoutePropagationInput) ec2.EnableVgwRoutePropagationRequest @@ -644,6 +650,8 @@ type ClientAPI interface { ModifyInstanceEventStartTimeRequest(*ec2.ModifyInstanceEventStartTimeInput) ec2.ModifyInstanceEventStartTimeRequest + ModifyInstanceMetadataOptionsRequest(*ec2.ModifyInstanceMetadataOptionsInput) ec2.ModifyInstanceMetadataOptionsRequest + ModifyInstancePlacementRequest(*ec2.ModifyInstancePlacementInput) ec2.ModifyInstancePlacementRequest ModifyLaunchTemplateRequest(*ec2.ModifyLaunchTemplateInput) ec2.ModifyLaunchTemplateRequest @@ -818,6 +826,8 @@ type ClientAPI interface { WaitUntilPasswordDataAvailable(context.Context, *ec2.GetPasswordDataInput, ...aws.WaiterOption) error + WaitUntilSecurityGroupExists(context.Context, *ec2.DescribeSecurityGroupsInput, ...aws.WaiterOption) error + WaitUntilSnapshotCompleted(context.Context, *ec2.DescribeSnapshotsInput, ...aws.WaiterOption) error WaitUntilSpotInstanceRequestFulfilled(context.Context, *ec2.DescribeSpotInstanceRequestsInput, ...aws.WaiterOption) error diff --git a/service/ecs/api_op_ListAccountSettings.go b/service/ecs/api_op_ListAccountSettings.go index 122b75aaa1c..081d334c421 100644 --- a/service/ecs/api_op_ListAccountSettings.go +++ b/service/ecs/api_op_ListAccountSettings.go @@ -30,10 +30,10 @@ type ListAccountSettingsInput struct { // The resource name you want to list the account settings for. Name SettingName `locationName:"name" type:"string" enum:"true"` - // The nextToken value returned from a previous paginated ListAccountSettings - // request where maxResults was used and the results exceeded the value of that - // parameter. Pagination continues from the end of the previous results that - // returned the nextToken value. + // The nextToken value returned from a ListAccountSettings request indicating + // that more results are available to fulfill the request and further calls + // will be needed. If maxResults was provided, it is possible the number of + // results to be fewer than maxResults. // // This token should be treated as an opaque identifier that is only used to // retrieve the next items in a list and not for other programmatic purposes. diff --git a/service/ecs/api_op_ListAttributes.go b/service/ecs/api_op_ListAttributes.go index 8cb7a637a37..f0cf93e49f5 100644 --- a/service/ecs/api_op_ListAttributes.go +++ b/service/ecs/api_op_ListAttributes.go @@ -32,10 +32,10 @@ type ListAttributesInput struct { // results and a nextToken value if applicable. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // The nextToken value returned from a previous paginated ListAttributes request - // where maxResults was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. + // The nextToken value returned from a ListAttributes request indicating that + // more results are available to fulfill the request and further calls will + // be needed. If maxResults was provided, it is possible the number of results + // to be fewer than maxResults. // // This token should be treated as an opaque identifier that is only used to // retrieve the next items in a list and not for other programmatic purposes. diff --git a/service/ecs/api_op_ListClusters.go b/service/ecs/api_op_ListClusters.go index dda4327b829..9a0eff0cfce 100644 --- a/service/ecs/api_op_ListClusters.go +++ b/service/ecs/api_op_ListClusters.go @@ -21,10 +21,10 @@ type ListClustersInput struct { // and a nextToken value if applicable. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // The nextToken value returned from a previous paginated ListClusters request - // where maxResults was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. + // The nextToken value returned from a ListClusters request indicating that + // more results are available to fulfill the request and further calls will + // be needed. If maxResults was provided, it is possible the number of results + // to be fewer than maxResults. // // This token should be treated as an opaque identifier that is only used to // retrieve the next items in a list and not for other programmatic purposes. diff --git a/service/ecs/api_op_ListContainerInstances.go b/service/ecs/api_op_ListContainerInstances.go index 2d66c589a48..3c4262c1f37 100644 --- a/service/ecs/api_op_ListContainerInstances.go +++ b/service/ecs/api_op_ListContainerInstances.go @@ -33,10 +33,10 @@ type ListContainerInstancesInput struct { // applicable. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // The nextToken value returned from a previous paginated ListContainerInstances - // request where maxResults was used and the results exceeded the value of that - // parameter. Pagination continues from the end of the previous results that - // returned the nextToken value. + // The nextToken value returned from a ListContainerInstances request indicating + // that more results are available to fulfill the request and further calls + // will be needed. If maxResults was provided, it is possible the number of + // results to be fewer than maxResults. // // This token should be treated as an opaque identifier that is only used to // retrieve the next items in a list and not for other programmatic purposes. diff --git a/service/ecs/api_op_ListServices.go b/service/ecs/api_op_ListServices.go index 9df1db3c118..1ff4c597486 100644 --- a/service/ecs/api_op_ListServices.go +++ b/service/ecs/api_op_ListServices.go @@ -29,10 +29,10 @@ type ListServicesInput struct { // and a nextToken value if applicable. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // The nextToken value returned from a previous paginated ListServices request - // where maxResults was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. + // The nextToken value returned from a ListServices request indicating that + // more results are available to fulfill the request and further calls will + // be needed. If maxResults was provided, it is possible the number of results + // to be fewer than maxResults. // // This token should be treated as an opaque identifier that is only used to // retrieve the next items in a list and not for other programmatic purposes. diff --git a/service/ecs/api_op_ListTaskDefinitionFamilies.go b/service/ecs/api_op_ListTaskDefinitionFamilies.go index 8fcf425ad4d..770eede8ea1 100644 --- a/service/ecs/api_op_ListTaskDefinitionFamilies.go +++ b/service/ecs/api_op_ListTaskDefinitionFamilies.go @@ -27,10 +27,10 @@ type ListTaskDefinitionFamiliesInput struct { // if applicable. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // The nextToken value returned from a previous paginated ListTaskDefinitionFamilies - // request where maxResults was used and the results exceeded the value of that - // parameter. Pagination continues from the end of the previous results that - // returned the nextToken value. + // The nextToken value returned from a ListTaskDefinitionFamilies request indicating + // that more results are available to fulfill the request and further calls + // will be needed. If maxResults was provided, it is possible the number of + // results to be fewer than maxResults. // // This token should be treated as an opaque identifier that is only used to // retrieve the next items in a list and not for other programmatic purposes. diff --git a/service/ecs/api_op_ListTaskDefinitions.go b/service/ecs/api_op_ListTaskDefinitions.go index 000a340002a..e833d661dbc 100644 --- a/service/ecs/api_op_ListTaskDefinitions.go +++ b/service/ecs/api_op_ListTaskDefinitions.go @@ -26,10 +26,10 @@ type ListTaskDefinitionsInput struct { // returns up to 100 results and a nextToken value if applicable. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // The nextToken value returned from a previous paginated ListTaskDefinitions - // request where maxResults was used and the results exceeded the value of that - // parameter. Pagination continues from the end of the previous results that - // returned the nextToken value. + // The nextToken value returned from a ListTaskDefinitions request indicating + // that more results are available to fulfill the request and further calls + // will be needed. If maxResults was provided, it is possible the number of + // results to be fewer than maxResults. // // This token should be treated as an opaque identifier that is only used to // retrieve the next items in a list and not for other programmatic purposes. diff --git a/service/ecs/api_op_ListTasks.go b/service/ecs/api_op_ListTasks.go index cdf58563ec5..386b4ba6f01 100644 --- a/service/ecs/api_op_ListTasks.go +++ b/service/ecs/api_op_ListTasks.go @@ -50,10 +50,10 @@ type ListTasksInput struct { // value if applicable. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // The nextToken value returned from a previous paginated ListTasks request - // where maxResults was used and the results exceeded the value of that parameter. - // Pagination continues from the end of the previous results that returned the - // nextToken value. + // The nextToken value returned from a ListTasks request indicating that more + // results are available to fulfill the request and further calls will be needed. + // If maxResults was provided, it is possible the number of results to be fewer + // than maxResults. // // This token should be treated as an opaque identifier that is only used to // retrieve the next items in a list and not for other programmatic purposes. diff --git a/service/ecs/api_op_RunTask.go b/service/ecs/api_op_RunTask.go index d16318dd185..bce4edcbdda 100644 --- a/service/ecs/api_op_RunTask.go +++ b/service/ecs/api_op_RunTask.go @@ -80,6 +80,9 @@ type RunTaskInput struct { // a task. PropagateTags PropagateTags `locationName:"propagateTags" type:"string" enum:"true"` + // The reference ID to use for the task. + ReferenceId *string `locationName:"referenceId" type:"string"` + // An optional tag specified when a task is started. For example, if you automatically // trigger a task to run a batch process job, you could apply a unique identifier // for that job to your task with the startedBy parameter. You can then identify diff --git a/service/ecs/api_op_StartTask.go b/service/ecs/api_op_StartTask.go index c5cc7dbd597..d14b24952f7 100644 --- a/service/ecs/api_op_StartTask.go +++ b/service/ecs/api_op_StartTask.go @@ -54,6 +54,9 @@ type StartTaskInput struct { // to the task. If no value is specified, the tags are not propagated. PropagateTags PropagateTags `locationName:"propagateTags" type:"string" enum:"true"` + // The reference ID to use for the task. + ReferenceId *string `locationName:"referenceId" type:"string"` + // An optional tag specified when a task is started. For example, if you automatically // trigger a task to run a batch process job, you could apply a unique identifier // for that job to your task with the startedBy parameter. You can then identify diff --git a/service/ecs/api_types.go b/service/ecs/api_types.go index 44a4ed12d59..68b16acf984 100644 --- a/service/ecs/api_types.go +++ b/service/ecs/api_types.go @@ -433,9 +433,8 @@ type ContainerDefinition struct { // AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) // in the Amazon Elastic Container Service Developer Guide. // - // This parameter is available for tasks using the Fargate launch type in the - // Ohio (us-east-2) region only and the task or service requires platform version - // 1.3.0 or later. + // For tasks using the Fargate launch type, the task or service requires platform + // version 1.3.0 or later. DependsOn []ContainerDependency `locationName:"dependsOn" type:"list"` // When this parameter is true, networking is disabled within the container. @@ -801,15 +800,14 @@ type ContainerDefinition struct { // AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) // in the Amazon Elastic Container Service Developer Guide. // - // This parameter is available for tasks using the Fargate launch type in the - // Ohio (us-east-2) region only and the task or service requires platform version - // 1.3.0 or later. + // For tasks using the Fargate launch type, the task or service requires platform + // version 1.3.0 or later. StartTimeout *int64 `locationName:"startTimeout" type:"integer"` // Time duration (in seconds) to wait before the container is forcefully killed - // if it doesn't exit normally on its own. For tasks using the Fargate launch - // type, the max stopTimeout value is 2 minutes. This parameter is available - // for tasks using the Fargate launch type in the Ohio (us-east-2) region only + // if it doesn't exit normally on its own. + // + // For tasks using the Fargate launch type, the max stopTimeout value is 2 minutes // and the task or service requires platform version 1.3.0 or later. // // For tasks using the EC2 launch type, the stop timeout value for the container @@ -1526,6 +1524,9 @@ type Failure struct { // The Amazon Resource Name (ARN) of the failed resource. Arn *string `locationName:"arn" type:"string"` + // The details of the failure. + Detail *string `locationName:"detail" type:"string"` + // The reason for the failure. Reason *string `locationName:"reason" type:"string"` } @@ -2004,20 +2005,16 @@ type LogConfiguration struct { // parameter are log drivers that the Amazon ECS container agent can communicate // with by default. // - // For tasks using the Fargate launch type, the supported log drivers are awslogs, - // splunk, and awsfirelens. + // For tasks using the Fargate launch type, the supported log drivers are awslogs + // and splunk. // // For tasks using the EC2 launch type, the supported log drivers are awslogs, - // fluentd, gelf, json-file, journald, logentries, syslog, splunk, and awsfirelens. + // fluentd, gelf, json-file, journald, logentries, syslog, and splunk. // // For more information about using the awslogs log driver, see Using the awslogs // Log Driver (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) // in the Amazon Elastic Container Service Developer Guide. // - // For more information about using the awsfirelens log driver, see Custom Log - // Routing (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) - // in the Amazon Elastic Container Service Developer Guide. - // // If you have a custom driver that is not listed above that you would like // to work with the Amazon ECS container agent, you can fork the Amazon ECS // container agent project that is available on GitHub (https://github.com/aws/amazon-ecs-agent) @@ -2938,6 +2935,12 @@ type Task struct { // awsvpc network mode. Attachments []Attachment `locationName:"attachments" type:"list"` + // The attributes of the task + Attributes []Attribute `locationName:"attributes" type:"list"` + + // The availability zone of the task. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + // The ARN of the cluster that hosts the task. ClusterArn *string `locationName:"clusterArn" type:"string"` @@ -3189,6 +3192,9 @@ type TaskDefinition struct { // ECS gives sequential revision numbers to each task definition that you add. Family *string `locationName:"family" type:"string"` + // The Elastic Inference accelerator associated with the task. + InferenceAccelerators []InferenceAccelerator `locationName:"inferenceAccelerators" type:"list"` + // The IPC resource namespace to use for the containers in the task. The valid // values are host, task, or none. If host is specified, then all containers // within the tasks that specified the host IPC mode on the same container instance @@ -3397,6 +3403,9 @@ type TaskOverride struct { // One or more container overrides sent to a task. ContainerOverrides []ContainerOverride `locationName:"containerOverrides" type:"list"` + // The cpu override for the task. + Cpu *string `locationName:"cpu" type:"string"` + // The Amazon Resource Name (ARN) of the task execution role that the Amazon // ECS container agent and the Docker daemon can assume. ExecutionRoleArn *string `locationName:"executionRoleArn" type:"string"` @@ -3404,6 +3413,9 @@ type TaskOverride struct { // The Elastic Inference accelerator override for the task. InferenceAcceleratorOverrides []InferenceAcceleratorOverride `locationName:"inferenceAcceleratorOverrides" type:"list"` + // The memory override for the task. + Memory *string `locationName:"memory" type:"string"` + // The Amazon Resource Name (ARN) of the IAM role that containers in this task // can assume. All containers in this task are granted the permissions that // are specified in this role. diff --git a/service/eks/api_enums.go b/service/eks/api_enums.go index 84b9ff218e1..435e0437fb6 100644 --- a/service/eks/api_enums.go +++ b/service/eks/api_enums.go @@ -2,6 +2,23 @@ package eks +type AMITypes string + +// Enum values for AMITypes +const ( + AMITypesAl2X8664 AMITypes = "AL2_x86_64" + AMITypesAl2X8664Gpu AMITypes = "AL2_x86_64_GPU" +) + +func (enum AMITypes) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum AMITypes) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type ClusterStatus string // Enum values for ClusterStatus @@ -10,6 +27,7 @@ const ( ClusterStatusActive ClusterStatus = "ACTIVE" ClusterStatusDeleting ClusterStatus = "DELETING" ClusterStatusFailed ClusterStatus = "FAILED" + ClusterStatusUpdating ClusterStatus = "UPDATING" ) func (enum ClusterStatus) MarshalValue() (string, error) { @@ -25,14 +43,17 @@ type ErrorCode string // Enum values for ErrorCode const ( - ErrorCodeSubnetNotFound ErrorCode = "SubnetNotFound" - ErrorCodeSecurityGroupNotFound ErrorCode = "SecurityGroupNotFound" - ErrorCodeEniLimitReached ErrorCode = "EniLimitReached" - ErrorCodeIpNotAvailable ErrorCode = "IpNotAvailable" - ErrorCodeAccessDenied ErrorCode = "AccessDenied" - ErrorCodeOperationNotPermitted ErrorCode = "OperationNotPermitted" - ErrorCodeVpcIdNotFound ErrorCode = "VpcIdNotFound" - ErrorCodeUnknown ErrorCode = "Unknown" + ErrorCodeSubnetNotFound ErrorCode = "SubnetNotFound" + ErrorCodeSecurityGroupNotFound ErrorCode = "SecurityGroupNotFound" + ErrorCodeEniLimitReached ErrorCode = "EniLimitReached" + ErrorCodeIpNotAvailable ErrorCode = "IpNotAvailable" + ErrorCodeAccessDenied ErrorCode = "AccessDenied" + ErrorCodeOperationNotPermitted ErrorCode = "OperationNotPermitted" + ErrorCodeVpcIdNotFound ErrorCode = "VpcIdNotFound" + ErrorCodeUnknown ErrorCode = "Unknown" + ErrorCodeNodeCreationFailure ErrorCode = "NodeCreationFailure" + ErrorCodePodEvictionFailure ErrorCode = "PodEvictionFailure" + ErrorCodeInsufficientFreeAddresses ErrorCode = "InsufficientFreeAddresses" ) func (enum ErrorCode) MarshalValue() (string, error) { @@ -64,6 +85,55 @@ func (enum LogType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type NodegroupIssueCode string + +// Enum values for NodegroupIssueCode +const ( + NodegroupIssueCodeAutoScalingGroupNotFound NodegroupIssueCode = "AutoScalingGroupNotFound" + NodegroupIssueCodeEc2securityGroupNotFound NodegroupIssueCode = "Ec2SecurityGroupNotFound" + NodegroupIssueCodeEc2securityGroupDeletionFailure NodegroupIssueCode = "Ec2SecurityGroupDeletionFailure" + NodegroupIssueCodeEc2launchTemplateNotFound NodegroupIssueCode = "Ec2LaunchTemplateNotFound" + NodegroupIssueCodeEc2launchTemplateVersionMismatch NodegroupIssueCode = "Ec2LaunchTemplateVersionMismatch" + NodegroupIssueCodeIamInstanceProfileNotFound NodegroupIssueCode = "IamInstanceProfileNotFound" + NodegroupIssueCodeIamNodeRoleNotFound NodegroupIssueCode = "IamNodeRoleNotFound" + NodegroupIssueCodeAsgInstanceLaunchFailures NodegroupIssueCode = "AsgInstanceLaunchFailures" + NodegroupIssueCodeInstanceLimitExceeded NodegroupIssueCode = "InstanceLimitExceeded" + NodegroupIssueCodeInsufficientFreeAddresses NodegroupIssueCode = "InsufficientFreeAddresses" + NodegroupIssueCodeAccessDenied NodegroupIssueCode = "AccessDenied" + NodegroupIssueCodeInternalFailure NodegroupIssueCode = "InternalFailure" +) + +func (enum NodegroupIssueCode) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum NodegroupIssueCode) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type NodegroupStatus string + +// Enum values for NodegroupStatus +const ( + NodegroupStatusCreating NodegroupStatus = "CREATING" + NodegroupStatusActive NodegroupStatus = "ACTIVE" + NodegroupStatusUpdating NodegroupStatus = "UPDATING" + NodegroupStatusDeleting NodegroupStatus = "DELETING" + NodegroupStatusCreateFailed NodegroupStatus = "CREATE_FAILED" + NodegroupStatusDeleteFailed NodegroupStatus = "DELETE_FAILED" + NodegroupStatusDegraded NodegroupStatus = "DEGRADED" +) + +func (enum NodegroupStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum NodegroupStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type UpdateParamType string // Enum values for UpdateParamType @@ -73,6 +143,12 @@ const ( UpdateParamTypeEndpointPrivateAccess UpdateParamType = "EndpointPrivateAccess" UpdateParamTypeEndpointPublicAccess UpdateParamType = "EndpointPublicAccess" UpdateParamTypeClusterLogging UpdateParamType = "ClusterLogging" + UpdateParamTypeDesiredSize UpdateParamType = "DesiredSize" + UpdateParamTypeLabelsToAdd UpdateParamType = "LabelsToAdd" + UpdateParamTypeLabelsToRemove UpdateParamType = "LabelsToRemove" + UpdateParamTypeMaxSize UpdateParamType = "MaxSize" + UpdateParamTypeMinSize UpdateParamType = "MinSize" + UpdateParamTypeReleaseVersion UpdateParamType = "ReleaseVersion" ) func (enum UpdateParamType) MarshalValue() (string, error) { @@ -110,6 +186,7 @@ const ( UpdateTypeVersionUpdate UpdateType = "VersionUpdate" UpdateTypeEndpointAccessUpdate UpdateType = "EndpointAccessUpdate" UpdateTypeLoggingUpdate UpdateType = "LoggingUpdate" + UpdateTypeConfigUpdate UpdateType = "ConfigUpdate" ) func (enum UpdateType) MarshalValue() (string, error) { diff --git a/service/eks/api_errors.go b/service/eks/api_errors.go index 89a5e2fb8bf..be0e3718379 100644 --- a/service/eks/api_errors.go +++ b/service/eks/api_errors.go @@ -56,7 +56,8 @@ const ( // "ResourceNotFoundException". // // The specified resource could not be found. You can view your available clusters - // with ListClusters. Amazon EKS clusters are Region-specific. + // with ListClusters. You can view your available managed node groups with ListNodegroups. + // Amazon EKS clusters and node groups are Region-specific. ErrCodeResourceNotFoundException = "ResourceNotFoundException" // ErrCodeServerException for service response error code diff --git a/service/eks/api_examples_test.go b/service/eks/api_examples_test.go index 136221eaa28..16a88885e3c 100644 --- a/service/eks/api_examples_test.go +++ b/service/eks/api_examples_test.go @@ -208,3 +208,40 @@ func ExampleClient_ListClustersRequest_shared00() { fmt.Println(result) } + +// To list tags for a cluster +// +// This example lists all of the tags for the `beta` cluster. +func ExampleClient_ListTagsForResourceRequest_shared00() { + cfg, err := external.LoadDefaultAWSConfig() + if err != nil { + panic("failed to load config, " + err.Error()) + } + + svc := eks.New(cfg) + input := &eks.ListTagsForResourceInput{ + ResourceArn: aws.String("arn:aws:eks:us-west-2:012345678910:cluster/beta"), + } + + req := svc.ListTagsForResourceRequest(input) + result, err := req.Send(context.Background()) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case eks.ErrCodeBadRequestException: + fmt.Println(eks.ErrCodeBadRequestException, aerr.Error()) + case eks.ErrCodeNotFoundException: + fmt.Println(eks.ErrCodeNotFoundException, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) +} diff --git a/service/eks/api_op_CreateNodegroup.go b/service/eks/api_op_CreateNodegroup.go new file mode 100644 index 00000000000..6e929f74b29 --- /dev/null +++ b/service/eks/api_op_CreateNodegroup.go @@ -0,0 +1,352 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package eks + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateNodegroupInput struct { + _ struct{} `type:"structure"` + + // The AMI type for your node group. GPU instance types should use the AL2_x86_64_GPU + // AMI type, which uses the Amazon EKS-optimized Linux AMI with GPU support; + // non-GPU instances should use the AL2_x86_64 AMI type, which uses the Amazon + // EKS-optimized Linux AMI. + AmiType AMITypes `locationName:"amiType" type:"string" enum:"true"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. + ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` + + // The name of the cluster to create the node group in. + // + // ClusterName is a required field + ClusterName *string `location:"uri" locationName:"name" type:"string" required:"true"` + + // The root device disk size (in GiB) for your node group instances. The default + // disk size is 20 GiB. + DiskSize *int64 `locationName:"diskSize" type:"integer"` + + // The instance type to use for your node group. Currently, you can specify + // a single instance type for a node group. The default value for this parameter + // is t3.medium. If you choose a GPU instance type, be sure to specify the AL2_x86_64_GPU + // with the amiType parameter. + InstanceTypes []string `locationName:"instanceTypes" type:"list"` + + // The Kubernetes labels to be applied to the nodes in the node group when they + // are created. + Labels map[string]string `locationName:"labels" type:"map"` + + // The IAM role associated with your node group. The Amazon EKS worker node + // kubelet daemon makes calls to AWS APIs on your behalf. Worker nodes receive + // permissions for these API calls through an IAM instance profile and associated + // policies. Before you can launch worker nodes and register them into a cluster, + // you must create an IAM role for those worker nodes to use when they are launched. + // For more information, see Amazon EKS Worker Node IAM Role (https://docs.aws.amazon.com/eks/latest/userguide/worker_node_IAM_role.html) + // in the Amazon EKS User Guide . + // + // NodeRole is a required field + NodeRole *string `locationName:"nodeRole" type:"string" required:"true"` + + // The unique name to give your node group. + // + // NodegroupName is a required field + NodegroupName *string `locationName:"nodegroupName" type:"string" required:"true"` + + // The AMI version of the Amazon EKS-optimized AMI to use with your node group. + // By default, the latest available AMI version for the node group's current + // Kubernetes version is used. For more information, see Amazon EKS-Optimized + // Linux AMI Versions (https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html) + // in the Amazon EKS User Guide. + ReleaseVersion *string `locationName:"releaseVersion" type:"string"` + + // The remote access (SSH) configuration to use with your node group. + RemoteAccess *RemoteAccessConfig `locationName:"remoteAccess" type:"structure"` + + // The scaling configuration details for the AutoScaling group that is created + // for your node group. + ScalingConfig *NodegroupScalingConfig `locationName:"scalingConfig" type:"structure"` + + // The subnets to use for the AutoScaling group that is created for your node + // group. These subnets must have the tag key kubernetes.io/cluster/CLUSTER_NAME + // with a value of shared, where CLUSTER_NAME is replaced with the name of your + // cluster. + // + // Subnets is a required field + Subnets []string `locationName:"subnets" type:"list" required:"true"` + + // The metadata to apply to the node group to assist with categorization and + // organization. Each tag consists of a key and an optional value, both of which + // you define. Node group tags do not propagate to any other resources associated + // with the node group, such as the Amazon EC2 instances or subnets. + Tags map[string]string `locationName:"tags" min:"1" type:"map"` + + // The Kubernetes version to use for your managed nodes. By default, the Kubernetes + // version of the cluster is used, and this is the only accepted specified value. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s CreateNodegroupInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNodegroupInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateNodegroupInput"} + + if s.ClusterName == nil { + invalidParams.Add(aws.NewErrParamRequired("ClusterName")) + } + + if s.NodeRole == nil { + invalidParams.Add(aws.NewErrParamRequired("NodeRole")) + } + + if s.NodegroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("NodegroupName")) + } + + if s.Subnets == nil { + invalidParams.Add(aws.NewErrParamRequired("Subnets")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) + } + if s.ScalingConfig != nil { + if err := s.ScalingConfig.Validate(); err != nil { + invalidParams.AddNested("ScalingConfig", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateNodegroupInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if len(s.AmiType) > 0 { + v := s.AmiType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "amiType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + var ClientRequestToken string + if s.ClientRequestToken != nil { + ClientRequestToken = *s.ClientRequestToken + } else { + ClientRequestToken = protocol.GetIdempotencyToken() + } + { + v := ClientRequestToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "clientRequestToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DiskSize != nil { + v := *s.DiskSize + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "diskSize", protocol.Int64Value(v), metadata) + } + if s.InstanceTypes != nil { + v := s.InstanceTypes + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "instanceTypes", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.Labels != nil { + v := s.Labels + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "labels", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.NodeRole != nil { + v := *s.NodeRole + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "nodeRole", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.NodegroupName != nil { + v := *s.NodegroupName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "nodegroupName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ReleaseVersion != nil { + v := *s.ReleaseVersion + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "releaseVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RemoteAccess != nil { + v := s.RemoteAccess + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "remoteAccess", v, metadata) + } + if s.ScalingConfig != nil { + v := s.ScalingConfig + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "scalingConfig", v, metadata) + } + if s.Subnets != nil { + v := s.Subnets + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "subnets", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.Version != nil { + v := *s.Version + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "version", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ClusterName != nil { + v := *s.ClusterName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateNodegroupOutput struct { + _ struct{} `type:"structure"` + + // The full description of your new node group. + Nodegroup *Nodegroup `locationName:"nodegroup" type:"structure"` +} + +// String returns the string representation +func (s CreateNodegroupOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateNodegroupOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Nodegroup != nil { + v := s.Nodegroup + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "nodegroup", v, metadata) + } + return nil +} + +const opCreateNodegroup = "CreateNodegroup" + +// CreateNodegroupRequest returns a request value for making API operation for +// Amazon Elastic Kubernetes Service. +// +// Creates a managed worker node group for an Amazon EKS cluster. You can only +// create a node group for your cluster that is equal to the current Kubernetes +// version for the cluster. All node groups are created with the latest AMI +// release version for the respective minor Kubernetes version of the cluster. +// +// An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and +// associated Amazon EC2 instances that are managed by AWS for an Amazon EKS +// cluster. Each node group uses a version of the Amazon EKS-optimized Amazon +// Linux 2 AMI. For more information, see Managed Node Groups (https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) +// in the Amazon EKS User Guide. +// +// // Example sending a request using CreateNodegroupRequest. +// req := client.CreateNodegroupRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/CreateNodegroup +func (c *Client) CreateNodegroupRequest(input *CreateNodegroupInput) CreateNodegroupRequest { + op := &aws.Operation{ + Name: opCreateNodegroup, + HTTPMethod: "POST", + HTTPPath: "/clusters/{name}/node-groups", + } + + if input == nil { + input = &CreateNodegroupInput{} + } + + req := c.newRequest(op, input, &CreateNodegroupOutput{}) + return CreateNodegroupRequest{Request: req, Input: input, Copy: c.CreateNodegroupRequest} +} + +// CreateNodegroupRequest is the request type for the +// CreateNodegroup API operation. +type CreateNodegroupRequest struct { + *aws.Request + Input *CreateNodegroupInput + Copy func(*CreateNodegroupInput) CreateNodegroupRequest +} + +// Send marshals and sends the CreateNodegroup API request. +func (r CreateNodegroupRequest) Send(ctx context.Context) (*CreateNodegroupResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateNodegroupResponse{ + CreateNodegroupOutput: r.Request.Data.(*CreateNodegroupOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateNodegroupResponse is the response type for the +// CreateNodegroup API operation. +type CreateNodegroupResponse struct { + *CreateNodegroupOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateNodegroup request. +func (r *CreateNodegroupResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/eks/api_op_DeleteCluster.go b/service/eks/api_op_DeleteCluster.go index ecac4e4f0b2..8a023b060fd 100644 --- a/service/eks/api_op_DeleteCluster.go +++ b/service/eks/api_op_DeleteCluster.go @@ -88,6 +88,9 @@ const opDeleteCluster = "DeleteCluster" // For more information, see Deleting a Cluster (https://docs.aws.amazon.com/eks/latest/userguide/delete-cluster.html) // in the Amazon EKS User Guide. // +// If you have managed node groups attached to the cluster, you must delete +// them first. For more information, see DeleteNodegroup. +// // // Example sending a request using DeleteClusterRequest. // req := client.DeleteClusterRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/eks/api_op_DeleteNodegroup.go b/service/eks/api_op_DeleteNodegroup.go new file mode 100644 index 00000000000..33cb87d3c3b --- /dev/null +++ b/service/eks/api_op_DeleteNodegroup.go @@ -0,0 +1,158 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package eks + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DeleteNodegroupInput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon EKS cluster that is associated with your node group. + // + // ClusterName is a required field + ClusterName *string `location:"uri" locationName:"name" type:"string" required:"true"` + + // The name of the node group to delete. + // + // NodegroupName is a required field + NodegroupName *string `location:"uri" locationName:"nodegroupName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteNodegroupInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNodegroupInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteNodegroupInput"} + + if s.ClusterName == nil { + invalidParams.Add(aws.NewErrParamRequired("ClusterName")) + } + + if s.NodegroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("NodegroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteNodegroupInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ClusterName != nil { + v := *s.ClusterName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.NodegroupName != nil { + v := *s.NodegroupName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "nodegroupName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteNodegroupOutput struct { + _ struct{} `type:"structure"` + + // The full description of your deleted node group. + Nodegroup *Nodegroup `locationName:"nodegroup" type:"structure"` +} + +// String returns the string representation +func (s DeleteNodegroupOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteNodegroupOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Nodegroup != nil { + v := s.Nodegroup + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "nodegroup", v, metadata) + } + return nil +} + +const opDeleteNodegroup = "DeleteNodegroup" + +// DeleteNodegroupRequest returns a request value for making API operation for +// Amazon Elastic Kubernetes Service. +// +// Deletes an Amazon EKS node group for a cluster. +// +// // Example sending a request using DeleteNodegroupRequest. +// req := client.DeleteNodegroupRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/DeleteNodegroup +func (c *Client) DeleteNodegroupRequest(input *DeleteNodegroupInput) DeleteNodegroupRequest { + op := &aws.Operation{ + Name: opDeleteNodegroup, + HTTPMethod: "DELETE", + HTTPPath: "/clusters/{name}/node-groups/{nodegroupName}", + } + + if input == nil { + input = &DeleteNodegroupInput{} + } + + req := c.newRequest(op, input, &DeleteNodegroupOutput{}) + return DeleteNodegroupRequest{Request: req, Input: input, Copy: c.DeleteNodegroupRequest} +} + +// DeleteNodegroupRequest is the request type for the +// DeleteNodegroup API operation. +type DeleteNodegroupRequest struct { + *aws.Request + Input *DeleteNodegroupInput + Copy func(*DeleteNodegroupInput) DeleteNodegroupRequest +} + +// Send marshals and sends the DeleteNodegroup API request. +func (r DeleteNodegroupRequest) Send(ctx context.Context) (*DeleteNodegroupResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteNodegroupResponse{ + DeleteNodegroupOutput: r.Request.Data.(*DeleteNodegroupOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteNodegroupResponse is the response type for the +// DeleteNodegroup API operation. +type DeleteNodegroupResponse struct { + *DeleteNodegroupOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteNodegroup request. +func (r *DeleteNodegroupResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/eks/api_op_DescribeNodegroup.go b/service/eks/api_op_DescribeNodegroup.go new file mode 100644 index 00000000000..7cc3d2fb94d --- /dev/null +++ b/service/eks/api_op_DescribeNodegroup.go @@ -0,0 +1,158 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package eks + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeNodegroupInput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon EKS cluster associated with the node group. + // + // ClusterName is a required field + ClusterName *string `location:"uri" locationName:"name" type:"string" required:"true"` + + // The name of the node group to describe. + // + // NodegroupName is a required field + NodegroupName *string `location:"uri" locationName:"nodegroupName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeNodegroupInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNodegroupInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeNodegroupInput"} + + if s.ClusterName == nil { + invalidParams.Add(aws.NewErrParamRequired("ClusterName")) + } + + if s.NodegroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("NodegroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeNodegroupInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ClusterName != nil { + v := *s.ClusterName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.NodegroupName != nil { + v := *s.NodegroupName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "nodegroupName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeNodegroupOutput struct { + _ struct{} `type:"structure"` + + // The full description of your node group. + Nodegroup *Nodegroup `locationName:"nodegroup" type:"structure"` +} + +// String returns the string representation +func (s DescribeNodegroupOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeNodegroupOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Nodegroup != nil { + v := s.Nodegroup + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "nodegroup", v, metadata) + } + return nil +} + +const opDescribeNodegroup = "DescribeNodegroup" + +// DescribeNodegroupRequest returns a request value for making API operation for +// Amazon Elastic Kubernetes Service. +// +// Returns descriptive information about an Amazon EKS node group. +// +// // Example sending a request using DescribeNodegroupRequest. +// req := client.DescribeNodegroupRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/DescribeNodegroup +func (c *Client) DescribeNodegroupRequest(input *DescribeNodegroupInput) DescribeNodegroupRequest { + op := &aws.Operation{ + Name: opDescribeNodegroup, + HTTPMethod: "GET", + HTTPPath: "/clusters/{name}/node-groups/{nodegroupName}", + } + + if input == nil { + input = &DescribeNodegroupInput{} + } + + req := c.newRequest(op, input, &DescribeNodegroupOutput{}) + return DescribeNodegroupRequest{Request: req, Input: input, Copy: c.DescribeNodegroupRequest} +} + +// DescribeNodegroupRequest is the request type for the +// DescribeNodegroup API operation. +type DescribeNodegroupRequest struct { + *aws.Request + Input *DescribeNodegroupInput + Copy func(*DescribeNodegroupInput) DescribeNodegroupRequest +} + +// Send marshals and sends the DescribeNodegroup API request. +func (r DescribeNodegroupRequest) Send(ctx context.Context) (*DescribeNodegroupResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeNodegroupResponse{ + DescribeNodegroupOutput: r.Request.Data.(*DescribeNodegroupOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeNodegroupResponse is the response type for the +// DescribeNodegroup API operation. +type DescribeNodegroupResponse struct { + *DescribeNodegroupOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeNodegroup request. +func (r *DescribeNodegroupResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/eks/api_op_DescribeUpdate.go b/service/eks/api_op_DescribeUpdate.go index 295c30dd851..5b87de5a2bb 100644 --- a/service/eks/api_op_DescribeUpdate.go +++ b/service/eks/api_op_DescribeUpdate.go @@ -13,11 +13,14 @@ import ( type DescribeUpdateInput struct { _ struct{} `type:"structure"` - // The name of the Amazon EKS cluster to update. + // The name of the Amazon EKS cluster associated with the update. // // Name is a required field Name *string `location:"uri" locationName:"name" type:"string" required:"true"` + // The name of the Amazon EKS node group associated with the update. + NodegroupName *string `location:"querystring" locationName:"nodegroupName" type:"string"` + // The ID of the update to describe. // // UpdateId is a required field @@ -63,6 +66,12 @@ func (s DescribeUpdateInput) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.PathTarget, "updateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.NodegroupName != nil { + v := *s.NodegroupName + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "nodegroupName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } return nil } @@ -94,7 +103,8 @@ const opDescribeUpdate = "DescribeUpdate" // DescribeUpdateRequest returns a request value for making API operation for // Amazon Elastic Kubernetes Service. // -// Returns descriptive information about an update against your Amazon EKS cluster. +// Returns descriptive information about an update against your Amazon EKS cluster +// or associated managed node group. // // When the status of the update is Succeeded, the update is complete. If an // update fails, the status is Failed, and an error detail explains the reason diff --git a/service/eks/api_op_ListNodegroups.go b/service/eks/api_op_ListNodegroups.go new file mode 100644 index 00000000000..354abc3a357 --- /dev/null +++ b/service/eks/api_op_ListNodegroups.go @@ -0,0 +1,246 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package eks + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListNodegroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon EKS cluster that you would like to list node groups + // in. + // + // ClusterName is a required field + ClusterName *string `location:"uri" locationName:"name" type:"string" required:"true"` + + // The maximum number of node group results returned by ListNodegroups in paginated + // output. When you use this parameter, ListNodegroups returns only maxResults + // results in a single page along with a nextToken response element. You can + // see the remaining results of the initial request by sending another ListNodegroups + // request with the returned nextToken value. This value can be between 1 and + // 100. If you don't use this parameter, ListNodegroups returns up to 100 results + // and a nextToken value if applicable. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated ListNodegroups request + // where maxResults was used and the results exceeded the value of that parameter. + // Pagination continues from the end of the previous results that returned the + // nextToken value. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListNodegroupsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListNodegroupsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListNodegroupsInput"} + + if s.ClusterName == nil { + invalidParams.Add(aws.NewErrParamRequired("ClusterName")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListNodegroupsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ClusterName != nil { + v := *s.ClusterName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "maxResults", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListNodegroupsOutput struct { + _ struct{} `type:"structure"` + + // The nextToken value to include in a future ListNodegroups request. When the + // results of a ListNodegroups request exceed maxResults, you can use this value + // to retrieve the next page of results. This value is null when there are no + // more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of all of the node groups associated with the specified cluster. + Nodegroups []string `locationName:"nodegroups" type:"list"` +} + +// String returns the string representation +func (s ListNodegroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListNodegroupsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Nodegroups != nil { + v := s.Nodegroups + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "nodegroups", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +const opListNodegroups = "ListNodegroups" + +// ListNodegroupsRequest returns a request value for making API operation for +// Amazon Elastic Kubernetes Service. +// +// Lists the Amazon EKS node groups associated with the specified cluster in +// your AWS account in the specified Region. +// +// // Example sending a request using ListNodegroupsRequest. +// req := client.ListNodegroupsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/ListNodegroups +func (c *Client) ListNodegroupsRequest(input *ListNodegroupsInput) ListNodegroupsRequest { + op := &aws.Operation{ + Name: opListNodegroups, + HTTPMethod: "GET", + HTTPPath: "/clusters/{name}/node-groups", + Paginator: &aws.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListNodegroupsInput{} + } + + req := c.newRequest(op, input, &ListNodegroupsOutput{}) + return ListNodegroupsRequest{Request: req, Input: input, Copy: c.ListNodegroupsRequest} +} + +// ListNodegroupsRequest is the request type for the +// ListNodegroups API operation. +type ListNodegroupsRequest struct { + *aws.Request + Input *ListNodegroupsInput + Copy func(*ListNodegroupsInput) ListNodegroupsRequest +} + +// Send marshals and sends the ListNodegroups API request. +func (r ListNodegroupsRequest) Send(ctx context.Context) (*ListNodegroupsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListNodegroupsResponse{ + ListNodegroupsOutput: r.Request.Data.(*ListNodegroupsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListNodegroupsRequestPaginator returns a paginator for ListNodegroups. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListNodegroupsRequest(input) +// p := eks.NewListNodegroupsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListNodegroupsPaginator(req ListNodegroupsRequest) ListNodegroupsPaginator { + return ListNodegroupsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListNodegroupsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListNodegroupsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListNodegroupsPaginator struct { + aws.Pager +} + +func (p *ListNodegroupsPaginator) CurrentPage() *ListNodegroupsOutput { + return p.Pager.CurrentPage().(*ListNodegroupsOutput) +} + +// ListNodegroupsResponse is the response type for the +// ListNodegroups API operation. +type ListNodegroupsResponse struct { + *ListNodegroupsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListNodegroups request. +func (r *ListNodegroupsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/eks/api_op_ListTagsForResource.go b/service/eks/api_op_ListTagsForResource.go index 48936fab337..5ca64f2d7c6 100644 --- a/service/eks/api_op_ListTagsForResource.go +++ b/service/eks/api_op_ListTagsForResource.go @@ -14,7 +14,8 @@ type ListTagsForResourceInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) that identifies the resource for which to - // list the tags. Currently, the supported resources are Amazon EKS clusters. + // list the tags. Currently, the supported resources are Amazon EKS clusters + // and managed node groups. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` diff --git a/service/eks/api_op_ListUpdates.go b/service/eks/api_op_ListUpdates.go index 1a7ca515fc1..c6e397422a7 100644 --- a/service/eks/api_op_ListUpdates.go +++ b/service/eks/api_op_ListUpdates.go @@ -32,6 +32,9 @@ type ListUpdatesInput struct { // Pagination continues from the end of the previous results that returned the // nextToken value. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // The name of the Amazon EKS managed node group to list updates for. + NodegroupName *string `location:"querystring" locationName:"nodegroupName" type:"string"` } // String returns the string representation @@ -78,6 +81,12 @@ func (s ListUpdatesInput) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.QueryTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.NodegroupName != nil { + v := *s.NodegroupName + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "nodegroupName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } return nil } @@ -127,8 +136,8 @@ const opListUpdates = "ListUpdates" // ListUpdatesRequest returns a request value for making API operation for // Amazon Elastic Kubernetes Service. // -// Lists the updates associated with an Amazon EKS cluster in your AWS account, -// in the specified Region. +// Lists the updates associated with an Amazon EKS cluster or managed node group +// in your AWS account, in the specified Region. // // // Example sending a request using ListUpdatesRequest. // req := client.ListUpdatesRequest(params) diff --git a/service/eks/api_op_TagResource.go b/service/eks/api_op_TagResource.go index 606ffda4acc..2c45f49f4ef 100644 --- a/service/eks/api_op_TagResource.go +++ b/service/eks/api_op_TagResource.go @@ -14,7 +14,7 @@ type TagResourceInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the resource to which to add tags. Currently, - // the supported resources are Amazon EKS clusters. + // the supported resources are Amazon EKS clusters and managed node groups. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` @@ -98,7 +98,10 @@ const opTagResource = "TagResource" // Associates the specified tags to a resource with the specified resourceArn. // If existing tags on a resource are not specified in the request parameters, // they are not changed. When a resource is deleted, the tags associated with -// that resource are deleted as well. +// that resource are deleted as well. Tags that you create for Amazon EKS resources +// do not propagate to any other resources associated with the cluster. For +// example, if you tag a cluster with this operation, that tag does not automatically +// propagate to the subnets and worker nodes associated with the cluster. // // // Example sending a request using TagResourceRequest. // req := client.TagResourceRequest(params) diff --git a/service/eks/api_op_UntagResource.go b/service/eks/api_op_UntagResource.go index 40902f142a4..6bcc9c9ea70 100644 --- a/service/eks/api_op_UntagResource.go +++ b/service/eks/api_op_UntagResource.go @@ -14,7 +14,8 @@ type UntagResourceInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the resource from which to delete tags. - // Currently, the supported resources are Amazon EKS clusters. + // Currently, the supported resources are Amazon EKS clusters and managed node + // groups. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` diff --git a/service/eks/api_op_UpdateClusterVersion.go b/service/eks/api_op_UpdateClusterVersion.go index 8fc82d98fa8..8a5babd2390 100644 --- a/service/eks/api_op_UpdateClusterVersion.go +++ b/service/eks/api_op_UpdateClusterVersion.go @@ -120,6 +120,10 @@ const opUpdateClusterVersion = "UpdateClusterVersion" // is eventually consistent). When the update is complete (either Failed or // Successful), the cluster status moves to Active. // +// If your cluster has managed node groups attached to it, all of your node +// groups’ Kubernetes versions must match the cluster’s Kubernetes version +// in order to update the cluster to a new Kubernetes version. +// // // Example sending a request using UpdateClusterVersionRequest. // req := client.UpdateClusterVersionRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/eks/api_op_UpdateNodegroupConfig.go b/service/eks/api_op_UpdateNodegroupConfig.go new file mode 100644 index 00000000000..40d9121a8f9 --- /dev/null +++ b/service/eks/api_op_UpdateNodegroupConfig.go @@ -0,0 +1,202 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package eks + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateNodegroupConfigInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. + ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` + + // The name of the Amazon EKS cluster that the managed node group resides in. + // + // ClusterName is a required field + ClusterName *string `location:"uri" locationName:"name" type:"string" required:"true"` + + // The Kubernetes labels to be applied to the nodes in the node group after + // the update. + Labels *UpdateLabelsPayload `locationName:"labels" type:"structure"` + + // The name of the managed node group to update. + // + // NodegroupName is a required field + NodegroupName *string `location:"uri" locationName:"nodegroupName" type:"string" required:"true"` + + // The scaling configuration details for the AutoScaling group after the update. + ScalingConfig *NodegroupScalingConfig `locationName:"scalingConfig" type:"structure"` +} + +// String returns the string representation +func (s UpdateNodegroupConfigInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateNodegroupConfigInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateNodegroupConfigInput"} + + if s.ClusterName == nil { + invalidParams.Add(aws.NewErrParamRequired("ClusterName")) + } + + if s.NodegroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("NodegroupName")) + } + if s.ScalingConfig != nil { + if err := s.ScalingConfig.Validate(); err != nil { + invalidParams.AddNested("ScalingConfig", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateNodegroupConfigInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + var ClientRequestToken string + if s.ClientRequestToken != nil { + ClientRequestToken = *s.ClientRequestToken + } else { + ClientRequestToken = protocol.GetIdempotencyToken() + } + { + v := ClientRequestToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "clientRequestToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Labels != nil { + v := s.Labels + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "labels", v, metadata) + } + if s.ScalingConfig != nil { + v := s.ScalingConfig + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "scalingConfig", v, metadata) + } + if s.ClusterName != nil { + v := *s.ClusterName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.NodegroupName != nil { + v := *s.NodegroupName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "nodegroupName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateNodegroupConfigOutput struct { + _ struct{} `type:"structure"` + + // An object representing an asynchronous update. + Update *Update `locationName:"update" type:"structure"` +} + +// String returns the string representation +func (s UpdateNodegroupConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateNodegroupConfigOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Update != nil { + v := s.Update + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "update", v, metadata) + } + return nil +} + +const opUpdateNodegroupConfig = "UpdateNodegroupConfig" + +// UpdateNodegroupConfigRequest returns a request value for making API operation for +// Amazon Elastic Kubernetes Service. +// +// Updates an Amazon EKS managed node group configuration. Your node group continues +// to function during the update. The response output includes an update ID +// that you can use to track the status of your node group update with the DescribeUpdate +// API operation. Currently you can update the Kubernetes labels for a node +// group or the scaling configuration. +// +// // Example sending a request using UpdateNodegroupConfigRequest. +// req := client.UpdateNodegroupConfigRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/UpdateNodegroupConfig +func (c *Client) UpdateNodegroupConfigRequest(input *UpdateNodegroupConfigInput) UpdateNodegroupConfigRequest { + op := &aws.Operation{ + Name: opUpdateNodegroupConfig, + HTTPMethod: "POST", + HTTPPath: "/clusters/{name}/node-groups/{nodegroupName}/update-config", + } + + if input == nil { + input = &UpdateNodegroupConfigInput{} + } + + req := c.newRequest(op, input, &UpdateNodegroupConfigOutput{}) + return UpdateNodegroupConfigRequest{Request: req, Input: input, Copy: c.UpdateNodegroupConfigRequest} +} + +// UpdateNodegroupConfigRequest is the request type for the +// UpdateNodegroupConfig API operation. +type UpdateNodegroupConfigRequest struct { + *aws.Request + Input *UpdateNodegroupConfigInput + Copy func(*UpdateNodegroupConfigInput) UpdateNodegroupConfigRequest +} + +// Send marshals and sends the UpdateNodegroupConfig API request. +func (r UpdateNodegroupConfigRequest) Send(ctx context.Context) (*UpdateNodegroupConfigResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateNodegroupConfigResponse{ + UpdateNodegroupConfigOutput: r.Request.Data.(*UpdateNodegroupConfigOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateNodegroupConfigResponse is the response type for the +// UpdateNodegroupConfig API operation. +type UpdateNodegroupConfigResponse struct { + *UpdateNodegroupConfigOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateNodegroupConfig request. +func (r *UpdateNodegroupConfigResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/eks/api_op_UpdateNodegroupVersion.go b/service/eks/api_op_UpdateNodegroupVersion.go new file mode 100644 index 00000000000..0e0bf952d7b --- /dev/null +++ b/service/eks/api_op_UpdateNodegroupVersion.go @@ -0,0 +1,229 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package eks + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateNodegroupVersionInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. + ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` + + // The name of the Amazon EKS cluster that is associated with the managed node + // group to update. + // + // ClusterName is a required field + ClusterName *string `location:"uri" locationName:"name" type:"string" required:"true"` + + // Force the update if the existing node group's pods are unable to be drained + // due to a pod disruption budget issue. If a previous update fails because + // pods could not be drained, you can force the update after it fails to terminate + // the old node regardless of whether or not any pods are running on the node. + Force *bool `locationName:"force" type:"boolean"` + + // The name of the managed node group to update. + // + // NodegroupName is a required field + NodegroupName *string `location:"uri" locationName:"nodegroupName" type:"string" required:"true"` + + // The AMI version of the Amazon EKS-optimized AMI to use for the update. By + // default, the latest available AMI version for the node group's Kubernetes + // version is used. For more information, see Amazon EKS-Optimized Linux AMI + // Versions (https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html) + // in the Amazon EKS User Guide. + ReleaseVersion *string `locationName:"releaseVersion" type:"string"` + + // The Kubernetes version to update to. If no version is specified, then the + // Kubernetes version of the node group does not change. You can specify the + // Kubernetes version of the cluster to update the node group to the latest + // AMI version of the cluster's Kubernetes version. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s UpdateNodegroupVersionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateNodegroupVersionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateNodegroupVersionInput"} + + if s.ClusterName == nil { + invalidParams.Add(aws.NewErrParamRequired("ClusterName")) + } + + if s.NodegroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("NodegroupName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateNodegroupVersionInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + var ClientRequestToken string + if s.ClientRequestToken != nil { + ClientRequestToken = *s.ClientRequestToken + } else { + ClientRequestToken = protocol.GetIdempotencyToken() + } + { + v := ClientRequestToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "clientRequestToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Force != nil { + v := *s.Force + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "force", protocol.BoolValue(v), metadata) + } + if s.ReleaseVersion != nil { + v := *s.ReleaseVersion + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "releaseVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Version != nil { + v := *s.Version + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "version", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ClusterName != nil { + v := *s.ClusterName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.NodegroupName != nil { + v := *s.NodegroupName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "nodegroupName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateNodegroupVersionOutput struct { + _ struct{} `type:"structure"` + + // An object representing an asynchronous update. + Update *Update `locationName:"update" type:"structure"` +} + +// String returns the string representation +func (s UpdateNodegroupVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateNodegroupVersionOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Update != nil { + v := s.Update + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "update", v, metadata) + } + return nil +} + +const opUpdateNodegroupVersion = "UpdateNodegroupVersion" + +// UpdateNodegroupVersionRequest returns a request value for making API operation for +// Amazon Elastic Kubernetes Service. +// +// Updates the Kubernetes version or AMI version of an Amazon EKS managed node +// group. +// +// You can update to the latest available AMI version of a node group's current +// Kubernetes version by not specifying a Kubernetes version in the request. +// You can update to the latest AMI version of your cluster's current Kubernetes +// version by specifying your cluster's Kubernetes version in the request. For +// more information, see Amazon EKS-Optimized Linux AMI Versions (https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html) +// in the Amazon EKS User Guide. +// +// You cannot roll back a node group to an earlier Kubernetes version or AMI +// version. +// +// When a node in a managed node group is terminated due to a scaling action +// or update, the pods in that node are drained first. Amazon EKS attempts to +// drain the nodes gracefully and will fail if it is unable to do so. You can +// force the update if Amazon EKS is unable to drain the nodes as a result of +// a pod disruption budget issue. +// +// // Example sending a request using UpdateNodegroupVersionRequest. +// req := client.UpdateNodegroupVersionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/eks-2017-11-01/UpdateNodegroupVersion +func (c *Client) UpdateNodegroupVersionRequest(input *UpdateNodegroupVersionInput) UpdateNodegroupVersionRequest { + op := &aws.Operation{ + Name: opUpdateNodegroupVersion, + HTTPMethod: "POST", + HTTPPath: "/clusters/{name}/node-groups/{nodegroupName}/update-version", + } + + if input == nil { + input = &UpdateNodegroupVersionInput{} + } + + req := c.newRequest(op, input, &UpdateNodegroupVersionOutput{}) + return UpdateNodegroupVersionRequest{Request: req, Input: input, Copy: c.UpdateNodegroupVersionRequest} +} + +// UpdateNodegroupVersionRequest is the request type for the +// UpdateNodegroupVersion API operation. +type UpdateNodegroupVersionRequest struct { + *aws.Request + Input *UpdateNodegroupVersionInput + Copy func(*UpdateNodegroupVersionInput) UpdateNodegroupVersionRequest +} + +// Send marshals and sends the UpdateNodegroupVersion API request. +func (r UpdateNodegroupVersionRequest) Send(ctx context.Context) (*UpdateNodegroupVersionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateNodegroupVersionResponse{ + UpdateNodegroupVersionOutput: r.Request.Data.(*UpdateNodegroupVersionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateNodegroupVersionResponse is the response type for the +// UpdateNodegroupVersion API operation. +type UpdateNodegroupVersionResponse struct { + *UpdateNodegroupVersionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateNodegroupVersion request. +func (r *UpdateNodegroupVersionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/eks/api_types.go b/service/eks/api_types.go index 7355a1db8da..0570e8f3f26 100644 --- a/service/eks/api_types.go +++ b/service/eks/api_types.go @@ -13,6 +13,31 @@ import ( var _ aws.Config var _ = awsutil.Prettify +// An AutoScaling group that is associated with an Amazon EKS managed node group. +type AutoScalingGroup struct { + _ struct{} `type:"structure"` + + // The name of the AutoScaling group associated with an Amazon EKS managed node + // group. + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s AutoScalingGroup) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AutoScalingGroup) MarshalFields(e protocol.FieldEncoder) error { + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + // An object representing the certificate-authority-data for your cluster. type Certificate struct { _ struct{} `type:"structure"` @@ -90,7 +115,8 @@ type Cluster struct { // The metadata that you apply to the cluster to assist with categorization // and organization. Each tag consists of a key and an optional value, both - // of which you define. + // of which you define. Cluster tags do not propagate to any other resources + // associated with the cluster. Tags map[string]string `locationName:"tags" min:"1" type:"map"` // The Kubernetes server version for the cluster. @@ -290,6 +316,99 @@ func (s Identity) MarshalFields(e protocol.FieldEncoder) error { return nil } +// An object representing an issue with an Amazon EKS resource. +type Issue struct { + _ struct{} `type:"structure"` + + // A brief description of the error. + // + // * AutoScalingGroupNotFound: We couldn't find the Auto Scaling group associated + // with the managed node group. You may be able to recreate an Auto Scaling + // group with the same settings to recover. + // + // * Ec2SecurityGroupNotFound: We couldn't find the cluster security group + // for the cluster. You must recreate your cluster. + // + // * Ec2SecurityGroupDeletionFailure: We could not delete the remote access + // security group for your managed node group. Remove any dependencies from + // the security group. + // + // * Ec2LaunchTemplateNotFound: We couldn't find the Amazon EC2 launch template + // for your managed node group. You may be able to recreate a launch template + // with the same settings to recover. + // + // * Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version + // for your managed node group does not match the version that Amazon EKS + // created. You may be able to revert to the Amazon EKS-created version to + // recover. + // + // * IamInstanceProfileNotFound: We couldn't find the IAM instance profile + // for your managed node group. You may be able to recreate an instance profile + // with the same settings to recover. + // + // * IamNodeRoleNotFound: We couldn't find the IAM role for your managed + // node group. You may be able to recreate an IAM role with the same settings + // to recover. + // + // * AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures + // while attempting to launch instances. + // + // * InstanceLimitExceeded: Your AWS account is unable to launch any more + // instances of the specified instance type. You may be able to request an + // Amazon EC2 instance limit increase to recover. + // + // * InsufficientFreeAddresses: One or more of the subnets associated with + // your managed node group does not have enough available IP addresses for + // new nodes. + // + // * AccessDenied: Amazon EKS and or one or more of your managed nodes is + // unable to communicate with your cluster API server. + // + // * InternalFailure: These errors are usually caused by an Amazon EKS server-side + // issue. + Code NodegroupIssueCode `locationName:"code" type:"string" enum:"true"` + + // The error message associated with the issue. + Message *string `locationName:"message" type:"string"` + + // The AWS resources that are afflicted by this issue. + ResourceIds []string `locationName:"resourceIds" type:"list"` +} + +// String returns the string representation +func (s Issue) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Issue) MarshalFields(e protocol.FieldEncoder) error { + if len(s.Code) > 0 { + v := s.Code + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "code", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.Message != nil { + v := *s.Message + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "message", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ResourceIds != nil { + v := s.ResourceIds + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "resourceIds", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + // An object representing the enabled or disabled Kubernetes control plane logs // for your cluster. type LogSetup struct { @@ -363,6 +482,377 @@ func (s Logging) MarshalFields(e protocol.FieldEncoder) error { return nil } +// An object representing an Amazon EKS managed node group. +type Nodegroup struct { + _ struct{} `type:"structure"` + + // The AMI type associated with your node group. GPU instance types should use + // the AL2_x86_64_GPU AMI type, which uses the Amazon EKS-optimized Linux AMI + // with GPU support; non-GPU instances should use the AL2_x86_64 AMI type, which + // uses the Amazon EKS-optimized Linux AMI. + AmiType AMITypes `locationName:"amiType" type:"string" enum:"true"` + + // The name of the cluster that the managed node group resides in. + ClusterName *string `locationName:"clusterName" type:"string"` + + // The Unix epoch timestamp in seconds for when the managed node group was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // The root device disk size (in GiB) for your node group instances. The default + // disk size is 20 GiB. + DiskSize *int64 `locationName:"diskSize" type:"integer"` + + // The health status of the node group. If there are issues with your node group's + // health, they are listed here. + Health *NodegroupHealth `locationName:"health" type:"structure"` + + // The instance types associated with your node group. + InstanceTypes []string `locationName:"instanceTypes" type:"list"` + + // The Kubernetes labels applied to the nodes in the node group. + // + // Only labels that are applied with the Amazon EKS API are shown here. There + // may be other Kubernetes labels applied to the nodes in this group. + Labels map[string]string `locationName:"labels" type:"map"` + + // The Unix epoch timestamp in seconds for when the managed node group was last + // modified. + ModifiedAt *time.Time `locationName:"modifiedAt" type:"timestamp"` + + // The IAM role associated with your node group. The Amazon EKS worker node + // kubelet daemon makes calls to AWS APIs on your behalf. Worker nodes receive + // permissions for these API calls through an IAM instance profile and associated + // policies. Before you can launch worker nodes and register them into a cluster, + // you must create an IAM role for those worker nodes to use when they are launched. + // For more information, see Amazon EKS Worker Node IAM Role (https://docs.aws.amazon.com/eks/latest/userguide/worker_node_IAM_role.html) + // in the Amazon EKS User Guide . + NodeRole *string `locationName:"nodeRole" type:"string"` + + // The Amazon Resource Name (ARN) associated with the managed node group. + NodegroupArn *string `locationName:"nodegroupArn" type:"string"` + + // The name associated with an Amazon EKS managed node group. + NodegroupName *string `locationName:"nodegroupName" type:"string"` + + // The AMI version of the managed node group. For more information, see Amazon + // EKS-Optimized Linux AMI Versions (https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html) + // in the Amazon EKS User Guide. + ReleaseVersion *string `locationName:"releaseVersion" type:"string"` + + // The remote access (SSH) configuration that is associated with the node group. + RemoteAccess *RemoteAccessConfig `locationName:"remoteAccess" type:"structure"` + + // The resources associated with the nodegroup, such as AutoScaling groups and + // security groups for remote access. + Resources *NodegroupResources `locationName:"resources" type:"structure"` + + // The scaling configuration details for the AutoScaling group that is associated + // with your node group. + ScalingConfig *NodegroupScalingConfig `locationName:"scalingConfig" type:"structure"` + + // The current status of the managed node group. + Status NodegroupStatus `locationName:"status" type:"string" enum:"true"` + + // The subnets allowed for the AutoScaling group that is associated with your + // node group. These subnets must have the following tag: kubernetes.io/cluster/CLUSTER_NAME, + // where CLUSTER_NAME is replaced with the name of your cluster. + Subnets []string `locationName:"subnets" type:"list"` + + // The metadata applied the node group to assist with categorization and organization. + // Each tag consists of a key and an optional value, both of which you define. + // Node group tags do not propagate to any other resources associated with the + // node group, such as the Amazon EC2 instances or subnets. + Tags map[string]string `locationName:"tags" min:"1" type:"map"` + + // The Kubernetes version of the managed node group. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s Nodegroup) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Nodegroup) MarshalFields(e protocol.FieldEncoder) error { + if len(s.AmiType) > 0 { + v := s.AmiType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "amiType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.ClusterName != nil { + v := *s.ClusterName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "clusterName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedAt != nil { + v := *s.CreatedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "createdAt", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.DiskSize != nil { + v := *s.DiskSize + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "diskSize", protocol.Int64Value(v), metadata) + } + if s.Health != nil { + v := s.Health + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "health", v, metadata) + } + if s.InstanceTypes != nil { + v := s.InstanceTypes + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "instanceTypes", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.Labels != nil { + v := s.Labels + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "labels", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.ModifiedAt != nil { + v := *s.ModifiedAt + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "modifiedAt", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.NodeRole != nil { + v := *s.NodeRole + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "nodeRole", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.NodegroupArn != nil { + v := *s.NodegroupArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "nodegroupArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.NodegroupName != nil { + v := *s.NodegroupName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "nodegroupName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ReleaseVersion != nil { + v := *s.ReleaseVersion + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "releaseVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RemoteAccess != nil { + v := s.RemoteAccess + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "remoteAccess", v, metadata) + } + if s.Resources != nil { + v := s.Resources + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "resources", v, metadata) + } + if s.ScalingConfig != nil { + v := s.ScalingConfig + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "scalingConfig", v, metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "status", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.Subnets != nil { + v := s.Subnets + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "subnets", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.Version != nil { + v := *s.Version + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "version", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An object representing the health status of the node group. +type NodegroupHealth struct { + _ struct{} `type:"structure"` + + // Any issues that are associated with the node group. + Issues []Issue `locationName:"issues" type:"list"` +} + +// String returns the string representation +func (s NodegroupHealth) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s NodegroupHealth) MarshalFields(e protocol.FieldEncoder) error { + if s.Issues != nil { + v := s.Issues + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "issues", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// An object representing the resources associated with the nodegroup, such +// as AutoScaling groups and security groups for remote access. +type NodegroupResources struct { + _ struct{} `type:"structure"` + + // The autoscaling groups associated with the node group. + AutoScalingGroups []AutoScalingGroup `locationName:"autoScalingGroups" type:"list"` + + // The remote access security group associated with the node group. This security + // group controls SSH access to the worker nodes. + RemoteAccessSecurityGroup *string `locationName:"remoteAccessSecurityGroup" type:"string"` +} + +// String returns the string representation +func (s NodegroupResources) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s NodegroupResources) MarshalFields(e protocol.FieldEncoder) error { + if s.AutoScalingGroups != nil { + v := s.AutoScalingGroups + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "autoScalingGroups", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.RemoteAccessSecurityGroup != nil { + v := *s.RemoteAccessSecurityGroup + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "remoteAccessSecurityGroup", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An object representing the scaling configuration details for the AutoScaling +// group that is associated with your node group. +type NodegroupScalingConfig struct { + _ struct{} `type:"structure"` + + // The current number of worker nodes that the managed node group should maintain. + DesiredSize *int64 `locationName:"desiredSize" min:"1" type:"integer"` + + // The maximum number of worker nodes that the managed node group can scale + // out to. Managed node groups can support up to 100 nodes by default. + MaxSize *int64 `locationName:"maxSize" min:"1" type:"integer"` + + // The minimum number of worker nodes that the managed node group can scale + // in to. This number must be greater than zero. + MinSize *int64 `locationName:"minSize" min:"1" type:"integer"` +} + +// String returns the string representation +func (s NodegroupScalingConfig) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NodegroupScalingConfig) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "NodegroupScalingConfig"} + if s.DesiredSize != nil && *s.DesiredSize < 1 { + invalidParams.Add(aws.NewErrParamMinValue("DesiredSize", 1)) + } + if s.MaxSize != nil && *s.MaxSize < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxSize", 1)) + } + if s.MinSize != nil && *s.MinSize < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MinSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s NodegroupScalingConfig) MarshalFields(e protocol.FieldEncoder) error { + if s.DesiredSize != nil { + v := *s.DesiredSize + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "desiredSize", protocol.Int64Value(v), metadata) + } + if s.MaxSize != nil { + v := *s.MaxSize + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "maxSize", protocol.Int64Value(v), metadata) + } + if s.MinSize != nil { + v := *s.MinSize + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "minSize", protocol.Int64Value(v), metadata) + } + return nil +} + // An object representing the OpenID Connect (https://openid.net/connect/) identity // provider information for the cluster. type OIDC struct { @@ -388,6 +878,54 @@ func (s OIDC) MarshalFields(e protocol.FieldEncoder) error { return nil } +// An object representing the remote access configuration for the managed node +// group. +type RemoteAccessConfig struct { + _ struct{} `type:"structure"` + + // The Amazon EC2 SSH key that provides access for SSH communication with the + // worker nodes in the managed node group. For more information, see Amazon + // EC2 Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) + // in the Amazon Elastic Compute Cloud User Guide for Linux Instances. + Ec2SshKey *string `locationName:"ec2SshKey" type:"string"` + + // The security groups to allow SSH access (port 22) from on the worker nodes. + // If you specify an Amazon EC2 SSH key, but you do not specify a source security + // group when you create a managed node group, port 22 on the worker nodes is + // opened to the internet (0.0.0.0/0). For more information, see Security Groups + // for Your VPC (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) + // in the Amazon Virtual Private Cloud User Guide. + SourceSecurityGroups []string `locationName:"sourceSecurityGroups" type:"list"` +} + +// String returns the string representation +func (s RemoteAccessConfig) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RemoteAccessConfig) MarshalFields(e protocol.FieldEncoder) error { + if s.Ec2SshKey != nil { + v := *s.Ec2SshKey + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ec2SshKey", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SourceSecurityGroups != nil { + v := s.SourceSecurityGroups + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "sourceSecurityGroups", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + // An object representing an asynchronous update. type Update struct { _ struct{} `type:"structure"` @@ -470,6 +1008,51 @@ func (s Update) MarshalFields(e protocol.FieldEncoder) error { return nil } +// An object representing a Kubernetes label change for a managed node group. +type UpdateLabelsPayload struct { + _ struct{} `type:"structure"` + + // Kubernetes labels to be added or updated. + AddOrUpdateLabels map[string]string `locationName:"addOrUpdateLabels" type:"map"` + + // Kubernetes labels to be removed. + RemoveLabels []string `locationName:"removeLabels" type:"list"` +} + +// String returns the string representation +func (s UpdateLabelsPayload) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateLabelsPayload) MarshalFields(e protocol.FieldEncoder) error { + if s.AddOrUpdateLabels != nil { + v := s.AddOrUpdateLabels + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "addOrUpdateLabels", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.RemoveLabels != nil { + v := s.RemoveLabels + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "removeLabels", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + // An object representing the details of an update request. type UpdateParam struct { _ struct{} `type:"structure"` @@ -587,6 +1170,11 @@ func (s VpcConfigRequest) MarshalFields(e protocol.FieldEncoder) error { type VpcConfigResponse struct { _ struct{} `type:"structure"` + // The cluster security group that was created by Amazon EKS for the cluster. + // Managed node groups use this security group for control plane to data plane + // communication. + ClusterSecurityGroupId *string `locationName:"clusterSecurityGroupId" type:"string"` + // This parameter indicates whether the Amazon EKS private API server endpoint // is enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes // API requests that originate from within your cluster's VPC use the private @@ -618,6 +1206,12 @@ func (s VpcConfigResponse) String() string { // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s VpcConfigResponse) MarshalFields(e protocol.FieldEncoder) error { + if s.ClusterSecurityGroupId != nil { + v := *s.ClusterSecurityGroupId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "clusterSecurityGroupId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.EndpointPrivateAccess != nil { v := *s.EndpointPrivateAccess diff --git a/service/eks/api_waiters.go b/service/eks/api_waiters.go index 4e22f76353a..b792b7910b8 100644 --- a/service/eks/api_waiters.go +++ b/service/eks/api_waiters.go @@ -106,3 +106,91 @@ func (c *Client) WaitUntilClusterDeleted(ctx context.Context, input *DescribeClu return w.Wait(ctx) } + +// WaitUntilNodegroupActive uses the Amazon EKS API operation +// DescribeNodegroup to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Client) WaitUntilNodegroupActive(ctx context.Context, input *DescribeNodegroupInput, opts ...aws.WaiterOption) error { + w := aws.Waiter{ + Name: "WaitUntilNodegroupActive", + MaxAttempts: 80, + Delay: aws.ConstantWaiterDelay(30 * time.Second), + Acceptors: []aws.WaiterAcceptor{ + { + State: aws.FailureWaiterState, + Matcher: aws.PathWaiterMatch, Argument: "nodegroup.status", + Expected: "CREATE_FAILED", + }, + { + State: aws.SuccessWaiterState, + Matcher: aws.PathWaiterMatch, Argument: "nodegroup.status", + Expected: "ACTIVE", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []aws.Option) (*aws.Request, error) { + var inCpy *DescribeNodegroupInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req := c.DescribeNodegroupRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req.Request, nil + }, + } + w.ApplyOptions(opts...) + + return w.Wait(ctx) +} + +// WaitUntilNodegroupDeleted uses the Amazon EKS API operation +// DescribeNodegroup to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Client) WaitUntilNodegroupDeleted(ctx context.Context, input *DescribeNodegroupInput, opts ...aws.WaiterOption) error { + w := aws.Waiter{ + Name: "WaitUntilNodegroupDeleted", + MaxAttempts: 40, + Delay: aws.ConstantWaiterDelay(30 * time.Second), + Acceptors: []aws.WaiterAcceptor{ + { + State: aws.FailureWaiterState, + Matcher: aws.PathWaiterMatch, Argument: "nodegroup.status", + Expected: "DELETE_FAILED", + }, + { + State: aws.SuccessWaiterState, + Matcher: aws.ErrorWaiterMatch, + Expected: "ResourceNotFoundException", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []aws.Option) (*aws.Request, error) { + var inCpy *DescribeNodegroupInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req := c.DescribeNodegroupRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req.Request, nil + }, + } + w.ApplyOptions(opts...) + + return w.Wait(ctx) +} diff --git a/service/eks/eksiface/interface.go b/service/eks/eksiface/interface.go index 8dbb1bb9186..eb97378de8d 100644 --- a/service/eks/eksiface/interface.go +++ b/service/eks/eksiface/interface.go @@ -66,14 +66,22 @@ import ( type ClientAPI interface { CreateClusterRequest(*eks.CreateClusterInput) eks.CreateClusterRequest + CreateNodegroupRequest(*eks.CreateNodegroupInput) eks.CreateNodegroupRequest + DeleteClusterRequest(*eks.DeleteClusterInput) eks.DeleteClusterRequest + DeleteNodegroupRequest(*eks.DeleteNodegroupInput) eks.DeleteNodegroupRequest + DescribeClusterRequest(*eks.DescribeClusterInput) eks.DescribeClusterRequest + DescribeNodegroupRequest(*eks.DescribeNodegroupInput) eks.DescribeNodegroupRequest + DescribeUpdateRequest(*eks.DescribeUpdateInput) eks.DescribeUpdateRequest ListClustersRequest(*eks.ListClustersInput) eks.ListClustersRequest + ListNodegroupsRequest(*eks.ListNodegroupsInput) eks.ListNodegroupsRequest + ListTagsForResourceRequest(*eks.ListTagsForResourceInput) eks.ListTagsForResourceRequest ListUpdatesRequest(*eks.ListUpdatesInput) eks.ListUpdatesRequest @@ -86,9 +94,17 @@ type ClientAPI interface { UpdateClusterVersionRequest(*eks.UpdateClusterVersionInput) eks.UpdateClusterVersionRequest + UpdateNodegroupConfigRequest(*eks.UpdateNodegroupConfigInput) eks.UpdateNodegroupConfigRequest + + UpdateNodegroupVersionRequest(*eks.UpdateNodegroupVersionInput) eks.UpdateNodegroupVersionRequest + WaitUntilClusterActive(context.Context, *eks.DescribeClusterInput, ...aws.WaiterOption) error WaitUntilClusterDeleted(context.Context, *eks.DescribeClusterInput, ...aws.WaiterOption) error + + WaitUntilNodegroupActive(context.Context, *eks.DescribeNodegroupInput, ...aws.WaiterOption) error + + WaitUntilNodegroupDeleted(context.Context, *eks.DescribeNodegroupInput, ...aws.WaiterOption) error } var _ ClientAPI = (*eks.Client)(nil) diff --git a/service/elasticloadbalancingv2/api_errors.go b/service/elasticloadbalancingv2/api_errors.go index 24712753b7c..94d3d481c08 100644 --- a/service/elasticloadbalancingv2/api_errors.go +++ b/service/elasticloadbalancingv2/api_errors.go @@ -211,6 +211,14 @@ const ( // You've reached the limit on the number of targets. ErrCodeTooManyTargetsException = "TooManyTargets" + // ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException for service response error code + // "TooManyUniqueTargetGroupsPerLoadBalancer". + // + // You've reached the limit on the number of unique target groups per load balancer + // across all listeners. If a target group is used by multiple actions for a + // load balancer, it is counted as only one use. + ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException = "TooManyUniqueTargetGroupsPerLoadBalancer" + // ErrCodeUnsupportedProtocolException for service response error code // "UnsupportedProtocol". // diff --git a/service/elasticloadbalancingv2/api_examples_test.go b/service/elasticloadbalancingv2/api_examples_test.go index d6a6d85d6b3..bbc83567472 100644 --- a/service/elasticloadbalancingv2/api_examples_test.go +++ b/service/elasticloadbalancingv2/api_examples_test.go @@ -137,6 +137,8 @@ func ExampleClient_CreateListenerRequest_shared00() { fmt.Println(elasticloadbalancingv2.ErrCodeTooManyActionsException, aerr.Error()) case elasticloadbalancingv2.ErrCodeInvalidLoadBalancerActionException: fmt.Println(elasticloadbalancingv2.ErrCodeInvalidLoadBalancerActionException, aerr.Error()) + case elasticloadbalancingv2.ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException: + fmt.Println(elasticloadbalancingv2.ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -219,6 +221,8 @@ func ExampleClient_CreateListenerRequest_shared01() { fmt.Println(elasticloadbalancingv2.ErrCodeTooManyActionsException, aerr.Error()) case elasticloadbalancingv2.ErrCodeInvalidLoadBalancerActionException: fmt.Println(elasticloadbalancingv2.ErrCodeInvalidLoadBalancerActionException, aerr.Error()) + case elasticloadbalancingv2.ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException: + fmt.Println(elasticloadbalancingv2.ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -423,6 +427,8 @@ func ExampleClient_CreateRuleRequest_shared00() { fmt.Println(elasticloadbalancingv2.ErrCodeTooManyActionsException, aerr.Error()) case elasticloadbalancingv2.ErrCodeInvalidLoadBalancerActionException: fmt.Println(elasticloadbalancingv2.ErrCodeInvalidLoadBalancerActionException, aerr.Error()) + case elasticloadbalancingv2.ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException: + fmt.Println(elasticloadbalancingv2.ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -1117,6 +1123,8 @@ func ExampleClient_ModifyListenerRequest_shared00() { fmt.Println(elasticloadbalancingv2.ErrCodeTooManyActionsException, aerr.Error()) case elasticloadbalancingv2.ErrCodeInvalidLoadBalancerActionException: fmt.Println(elasticloadbalancingv2.ErrCodeInvalidLoadBalancerActionException, aerr.Error()) + case elasticloadbalancingv2.ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException: + fmt.Println(elasticloadbalancingv2.ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -1185,6 +1193,8 @@ func ExampleClient_ModifyListenerRequest_shared01() { fmt.Println(elasticloadbalancingv2.ErrCodeTooManyActionsException, aerr.Error()) case elasticloadbalancingv2.ErrCodeInvalidLoadBalancerActionException: fmt.Println(elasticloadbalancingv2.ErrCodeInvalidLoadBalancerActionException, aerr.Error()) + case elasticloadbalancingv2.ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException: + fmt.Println(elasticloadbalancingv2.ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -1385,6 +1395,8 @@ func ExampleClient_ModifyRuleRequest_shared00() { fmt.Println(elasticloadbalancingv2.ErrCodeTooManyActionsException, aerr.Error()) case elasticloadbalancingv2.ErrCodeInvalidLoadBalancerActionException: fmt.Println(elasticloadbalancingv2.ErrCodeInvalidLoadBalancerActionException, aerr.Error()) + case elasticloadbalancingv2.ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException: + fmt.Println(elasticloadbalancingv2.ErrCodeTooManyUniqueTargetGroupsPerLoadBalancerException, aerr.Error()) default: fmt.Println(aerr.Error()) } diff --git a/service/elasticloadbalancingv2/api_op_CreateListener.go b/service/elasticloadbalancingv2/api_op_CreateListener.go index 943c232e504..e87d47601c4 100644 --- a/service/elasticloadbalancingv2/api_op_CreateListener.go +++ b/service/elasticloadbalancingv2/api_op_CreateListener.go @@ -23,10 +23,10 @@ type CreateListenerInput struct { // The actions for the default rule. The rule must include one forward action // or one or more fixed-response actions. // - // If the action type is forward, you specify a target group. The protocol of - // the target group must be HTTP or HTTPS for an Application Load Balancer. - // The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a - // Network Load Balancer. + // If the action type is forward, you specify one or more target groups. The + // protocol of the target group must be HTTP or HTTPS for an Application Load + // Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP + // for a Network Load Balancer. // // [HTTPS listeners] If the action type is authenticate-oidc, you authenticate // users through an identity provider that is OpenID Connect (OIDC) compliant. diff --git a/service/elasticloadbalancingv2/api_op_CreateRule.go b/service/elasticloadbalancingv2/api_op_CreateRule.go index be04942d861..3e6d981036f 100644 --- a/service/elasticloadbalancingv2/api_op_CreateRule.go +++ b/service/elasticloadbalancingv2/api_op_CreateRule.go @@ -17,10 +17,10 @@ type CreateRuleInput struct { // actions: forward, fixed-response, or redirect, and it must be the last action // to be performed. // - // If the action type is forward, you specify a target group. The protocol of - // the target group must be HTTP or HTTPS for an Application Load Balancer. - // The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a - // Network Load Balancer. + // If the action type is forward, you specify one or more target groups. The + // protocol of the target group must be HTTP or HTTPS for an Application Load + // Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP + // for a Network Load Balancer. // // [HTTPS listeners] If the action type is authenticate-oidc, you authenticate // users through an identity provider that is OpenID Connect (OIDC) compliant. diff --git a/service/elasticloadbalancingv2/api_op_ModifyListener.go b/service/elasticloadbalancingv2/api_op_ModifyListener.go index 24664e30df3..28c482cfd54 100644 --- a/service/elasticloadbalancingv2/api_op_ModifyListener.go +++ b/service/elasticloadbalancingv2/api_op_ModifyListener.go @@ -23,10 +23,10 @@ type ModifyListenerInput struct { // The actions for the default rule. The rule must include one forward action // or one or more fixed-response actions. // - // If the action type is forward, you specify a target group. The protocol of - // the target group must be HTTP or HTTPS for an Application Load Balancer. - // The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a - // Network Load Balancer. + // If the action type is forward, you specify one or more target groups. The + // protocol of the target group must be HTTP or HTTPS for an Application Load + // Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP + // for a Network Load Balancer. // // [HTTPS listeners] If the action type is authenticate-oidc, you authenticate // users through an identity provider that is OpenID Connect (OIDC) compliant. @@ -106,14 +106,18 @@ const opModifyListener = "ModifyListener" // ModifyListenerRequest returns a request value for making API operation for // Elastic Load Balancing. // -// Modifies the specified properties of the specified listener. +// Replaces the specified properties of the specified listener. Any properties +// that you do not specify remain unchanged. // -// Any properties that you do not specify retain their current values. However, -// changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the +// Changing the protocol from HTTPS to HTTP, or from TLS to TCP, removes the // security policy and default certificate properties. If you change the protocol // from HTTP to HTTPS, or from TCP to TLS, you must add the security policy // and default certificate properties. // +// To add an item to a list, remove an item from a list, or update an item in +// a list, you must provide the entire list. For example, to add an action, +// specify a list with the current actions plus the new action. +// // // Example sending a request using ModifyListenerRequest. // req := client.ModifyListenerRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/elasticloadbalancingv2/api_op_ModifyRule.go b/service/elasticloadbalancingv2/api_op_ModifyRule.go index 4c6e54260d7..d61a9358051 100644 --- a/service/elasticloadbalancingv2/api_op_ModifyRule.go +++ b/service/elasticloadbalancingv2/api_op_ModifyRule.go @@ -17,10 +17,10 @@ type ModifyRuleInput struct { // actions: forward, fixed-response, or redirect, and it must be the last action // to be performed. // - // If the action type is forward, you specify a target group. The protocol of - // the target group must be HTTP or HTTPS for an Application Load Balancer. - // The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP for a - // Network Load Balancer. + // If the action type is forward, you specify one or more target groups. The + // protocol of the target group must be HTTP or HTTPS for an Application Load + // Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP + // for a Network Load Balancer. // // [HTTPS listeners] If the action type is authenticate-oidc, you authenticate // users through an identity provider that is OpenID Connect (OIDC) compliant. @@ -89,9 +89,12 @@ const opModifyRule = "ModifyRule" // ModifyRuleRequest returns a request value for making API operation for // Elastic Load Balancing. // -// Modifies the specified rule. +// Replaces the specified properties of the specified rule. Any properties that +// you do not specify are unchanged. // -// Any existing properties that you do not modify retain their current values. +// To add an item to a list, remove an item from a list, or update an item in +// a list, you must provide the entire list. For example, to add an action, +// specify a list with the current actions plus the new action. // // To modify the actions for the default rule, use ModifyListener. // diff --git a/service/elasticloadbalancingv2/api_types.go b/service/elasticloadbalancingv2/api_types.go index f9e414d257d..cfd36eec6ba 100644 --- a/service/elasticloadbalancingv2/api_types.go +++ b/service/elasticloadbalancingv2/api_types.go @@ -28,6 +28,13 @@ type Action struct { // a custom HTTP response. Specify only when Type is fixed-response. FixedResponseConfig *FixedResponseActionConfig `type:"structure"` + // Information for creating an action that distributes requests among one or + // more target groups. For Network Load Balancers, you can specify a single + // target group. Specify only when Type is forward. If you specify both ForwardConfig + // and TargetGroupArn, you can specify only one target group using ForwardConfig + // and it must be the same target group specified in TargetGroupArn. + ForwardConfig *ForwardActionConfig `type:"structure"` + // The order for the action. This value is required for rules with multiple // actions. The action with the lowest value for order is performed first. The // last action to be performed must be one of the following types of actions: @@ -39,7 +46,8 @@ type Action struct { RedirectConfig *RedirectActionConfig `type:"structure"` // The Amazon Resource Name (ARN) of the target group. Specify only when Type - // is forward. + // is forward and you want to route to a single target group. To route to one + // or more target groups, use ForwardConfig instead. TargetGroupArn *string `type:"string"` // The type of action. @@ -369,6 +377,23 @@ func (s *FixedResponseActionConfig) Validate() error { return nil } +// Information about a forward action. +type ForwardActionConfig struct { + _ struct{} `type:"structure"` + + // The target group stickiness for the rule. + TargetGroupStickinessConfig *TargetGroupStickinessConfig `type:"structure"` + + // One or more target groups. For Network Load Balancers, you can specify a + // single target group. + TargetGroups []TargetGroupTuple `type:"list"` +} + +// String returns the string representation +func (s ForwardActionConfig) String() string { + return awsutil.Prettify(s) +} + // Information about a host header condition. type HostHeaderConditionConfig struct { _ struct{} `type:"structure"` @@ -467,6 +492,12 @@ type Limit struct { // // * target-groups // + // * target-groups-per-action-on-application-load-balancer + // + // * target-groups-per-action-on-network-load-balancer + // + // * target-groups-per-application-load-balancer + // // * targets-per-application-load-balancer // // * targets-per-availability-zone-per-network-load-balancer @@ -614,7 +645,7 @@ type LoadBalancerAttribute struct { // // * routing.http.drop_invalid_header_fields.enabled - Indicates whether // HTTP headers with invalid header fields are removed by the load balancer - // (true) or routed to targets (false). The default is true. + // (true) or routed to targets (false). The default is false. // // * routing.http2.enabled - Indicates whether HTTP/2 is enabled. The value // is true or false. The default is true. @@ -1250,6 +1281,40 @@ func (s TargetGroupAttribute) String() string { return awsutil.Prettify(s) } +// Information about the target group stickiness for a rule. +type TargetGroupStickinessConfig struct { + _ struct{} `type:"structure"` + + // The time period, in seconds, during which requests from a client should be + // routed to the same target group. The range is 1-604800 seconds (7 days). + DurationSeconds *int64 `type:"integer"` + + // Indicates whether target group stickiness is enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s TargetGroupStickinessConfig) String() string { + return awsutil.Prettify(s) +} + +// Information about how traffic will be distributed between multiple target +// groups in a forward rule. +type TargetGroupTuple struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the target group. + TargetGroupArn *string `type:"string"` + + // The weight. The range is 0 to 999. + Weight *int64 `type:"integer"` +} + +// String returns the string representation +func (s TargetGroupTuple) String() string { + return awsutil.Prettify(s) +} + // Information about the current health of a target. type TargetHealth struct { _ struct{} `type:"structure"` diff --git a/service/emr/api_op_AddInstanceFleet.go b/service/emr/api_op_AddInstanceFleet.go index 532e838a866..840738f37d7 100644 --- a/service/emr/api_op_AddInstanceFleet.go +++ b/service/emr/api_op_AddInstanceFleet.go @@ -54,6 +54,9 @@ func (s *AddInstanceFleetInput) Validate() error { type AddInstanceFleetOutput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name of the cluster. + ClusterArn *string `min:"20" type:"string"` + // The unique identifier of the cluster. ClusterId *string `type:"string"` diff --git a/service/emr/api_op_AddInstanceGroups.go b/service/emr/api_op_AddInstanceGroups.go index bc35d072558..2791555ce11 100644 --- a/service/emr/api_op_AddInstanceGroups.go +++ b/service/emr/api_op_AddInstanceGroups.go @@ -59,6 +59,9 @@ func (s *AddInstanceGroupsInput) Validate() error { type AddInstanceGroupsOutput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name of the cluster. + ClusterArn *string `min:"20" type:"string"` + // Instance group IDs of the newly created instance groups. InstanceGroupIds []string `type:"list"` diff --git a/service/emr/api_op_ListSteps.go b/service/emr/api_op_ListSteps.go index f61d7a0bd07..937073e1c7f 100644 --- a/service/emr/api_op_ListSteps.go +++ b/service/emr/api_op_ListSteps.go @@ -21,7 +21,9 @@ type ListStepsInput struct { // The pagination token that indicates the next set of results to retrieve. Marker *string `type:"string"` - // The filter to limit the step list based on the identifier of the steps. + // The filter to limit the step list based on the identifier of the steps. You + // can specify a maximum of ten Step IDs. The character constraint applies to + // the overall length of the array. StepIds []string `type:"list"` // The filter to limit the step list based on certain states. @@ -70,7 +72,8 @@ const opListSteps = "ListSteps" // Amazon Elastic MapReduce. // // Provides a list of steps for the cluster in reverse order unless you specify -// stepIds with the request. +// stepIds with the request of filter by StepStates. You can specify a maximum +// of ten stepIDs. // // // Example sending a request using ListStepsRequest. // req := client.ListStepsRequest(params) diff --git a/service/emr/api_op_PutAutoScalingPolicy.go b/service/emr/api_op_PutAutoScalingPolicy.go index ba5a9f024c6..316b0fcd386 100644 --- a/service/emr/api_op_PutAutoScalingPolicy.go +++ b/service/emr/api_op_PutAutoScalingPolicy.go @@ -68,6 +68,9 @@ type PutAutoScalingPolicyOutput struct { // The automatic scaling policy definition. AutoScalingPolicy *AutoScalingPolicyDescription `type:"structure"` + // The Amazon Resource Name of the cluster. + ClusterArn *string `min:"20" type:"string"` + // Specifies the ID of a cluster. The instance group to which the automatic // scaling policy is applied is within this cluster. ClusterId *string `type:"string"` diff --git a/service/emr/api_op_RunJobFlow.go b/service/emr/api_op_RunJobFlow.go index c7ede593849..c6ad53198e8 100644 --- a/service/emr/api_op_RunJobFlow.go +++ b/service/emr/api_op_RunJobFlow.go @@ -169,13 +169,10 @@ type RunJobFlowInput struct { // A list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags []Tag `type:"list"` - // This member will be deprecated. - // - // Whether the cluster is visible to all IAM users of the AWS account associated - // with the cluster. If this value is set to true, all IAM users of that AWS - // account can view and (if they have the proper policy permissions set) manage - // the cluster. If it is set to false, only the IAM user that created the cluster - // can view and manage it. + // A value of true indicates that all IAM users in the AWS account can perform + // cluster actions if they have the proper IAM policy permissions. This is the + // default. A value of false indicates that only the IAM user who created the + // cluster can perform actions. VisibleToAllUsers *bool `type:"boolean"` } @@ -230,6 +227,9 @@ func (s *RunJobFlowInput) Validate() error { type RunJobFlowOutput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name of the cluster. + ClusterArn *string `min:"20" type:"string"` + // An unique identifier for the job flow. JobFlowId *string `type:"string"` } diff --git a/service/emr/api_op_SetVisibleToAllUsers.go b/service/emr/api_op_SetVisibleToAllUsers.go index 3f6154b1684..0843236d027 100644 --- a/service/emr/api_op_SetVisibleToAllUsers.go +++ b/service/emr/api_op_SetVisibleToAllUsers.go @@ -11,24 +11,19 @@ import ( "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" ) -// This member will be deprecated. -// // The input to the SetVisibleToAllUsers action. type SetVisibleToAllUsersInput struct { _ struct{} `type:"structure"` - // Identifiers of the job flows to receive the new visibility setting. + // The unique identifier of the job flow (cluster). // // JobFlowIds is a required field JobFlowIds []string `type:"list" required:"true"` - // This member will be deprecated. - // - // Whether the specified clusters are visible to all IAM users of the AWS account - // associated with the cluster. If this value is set to True, all IAM users - // of that AWS account can view and, if they have the proper IAM policy permissions - // set, manage the clusters. If it is set to False, only the IAM user that created - // a cluster can view and manage it. + // A value of true indicates that all IAM users in the AWS account can perform + // cluster actions if they have the proper IAM policy permissions. This is the + // default. A value of false indicates that only the IAM user who created the + // cluster can perform actions. // // VisibleToAllUsers is a required field VisibleToAllUsers *bool `type:"boolean" required:"true"` @@ -71,14 +66,15 @@ const opSetVisibleToAllUsers = "SetVisibleToAllUsers" // SetVisibleToAllUsersRequest returns a request value for making API operation for // Amazon Elastic MapReduce. // -// This member will be deprecated. -// -// Sets whether all AWS Identity and Access Management (IAM) users under your -// account can access the specified clusters (job flows). This action works -// on running clusters. You can also set the visibility of a cluster when you -// launch it using the VisibleToAllUsers parameter of RunJobFlow. The SetVisibleToAllUsers -// action can be called only by an IAM user who created the cluster or the AWS -// account that owns the cluster. +// Sets the Cluster$VisibleToAllUsers value, which determines whether the cluster +// is visible to all IAM users of the AWS account associated with the cluster. +// Only the IAM user who created the cluster or the AWS account root user can +// call this action. The default value, true, indicates that all IAM users in +// the AWS account can perform cluster actions if they have the proper IAM policy +// permissions. If set to false, only the IAM user that created the cluster +// can perform actions. This action works on running clusters. You can override +// the default true setting when you create a cluster by using the VisibleToAllUsers +// parameter with RunJobFlow. // // // Example sending a request using SetVisibleToAllUsersRequest. // req := client.SetVisibleToAllUsersRequest(params) diff --git a/service/emr/api_types.go b/service/emr/api_types.go index 439de4ad60b..e2ad8faa6fa 100644 --- a/service/emr/api_types.go +++ b/service/emr/api_types.go @@ -326,8 +326,8 @@ type CloudWatchAlarmDefinition struct { // A CloudWatch metric dimension. Dimensions []MetricDimension `type:"list"` - // The number of periods, expressed in seconds using Period, during which the - // alarm condition must exist before the alarm triggers automatic scaling activity. + // The number of periods, in five-minute increments, during which the alarm + // condition must exist before the alarm triggers automatic scaling activity. // The default value is 1. EvaluationPeriods *int64 `type:"integer"` @@ -406,6 +406,9 @@ type Cluster struct { // Specifies whether the cluster should terminate after completing all steps. AutoTerminate *bool `type:"boolean"` + // The Amazon Resource Name of the cluster. + ClusterArn *string `min:"20" type:"string"` + // Applies only to Amazon EMR releases 4.x and later. The list of Configurations // supplied to the EMR cluster. Configurations []Configuration `type:"list"` @@ -511,14 +514,14 @@ type Cluster struct { // of a cluster error. TerminationProtected *bool `type:"boolean"` - // This member will be deprecated. - // // Indicates whether the cluster is visible to all IAM users of the AWS account - // associated with the cluster. If this value is set to true, all IAM users - // of that AWS account can view and manage the cluster if they have the proper - // policy permissions set. If this value is false, only the IAM user that created - // the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers - // action. + // associated with the cluster. The default value, true, indicates that all + // IAM users in the AWS account can perform cluster actions if they have the + // proper IAM policy permissions. If this value is false, only the IAM user + // that created the cluster can perform actions. This value can be changed on + // a running cluster by using the SetVisibleToAllUsers action. You can override + // the default value of true when you create a cluster by using the VisibleToAllUsers + // parameter of the RunJobFlow action. VisibleToAllUsers *bool `type:"boolean"` } @@ -567,6 +570,9 @@ func (s ClusterStatus) String() string { type ClusterSummary struct { _ struct{} `type:"structure"` + // The Amazon Resource Name of the cluster. + ClusterArn *string `min:"20" type:"string"` + // The unique identifier for the cluster. Id *string `type:"string"` @@ -1319,12 +1325,9 @@ type InstanceGroup struct { // of a CloudWatch metric. See PutAutoScalingPolicy. AutoScalingPolicy *AutoScalingPolicyDescription `type:"structure"` - // The maximum Spot price your are willing to pay for EC2 instances. - // - // An optional, nullable field that applies if the MarketType for the instance - // group is specified as SPOT. Specify the maximum spot price in USD. If the - // value is NULL and SPOT is specified, the maximum Spot price is set equal - // to the On-Demand price. + // The bid price for each EC2 Spot instance type as defined by InstanceType. + // Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice + // is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%. BidPrice *string `type:"string"` // @@ -1399,12 +1402,9 @@ type InstanceGroupConfig struct { // of a CloudWatch metric. See PutAutoScalingPolicy. AutoScalingPolicy *AutoScalingPolicy `type:"structure"` - // The maximum Spot price your are willing to pay for EC2 instances. - // - // An optional, nullable field that applies if the MarketType for the instance - // group is specified as SPOT. Specify the maximum spot price in USD. If the - // value is NULL and SPOT is specified, the maximum Spot price is set equal - // to the On-Demand price. + // The bid price for each EC2 Spot instance type as defined by InstanceType. + // Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice + // is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%. BidPrice *string `type:"string"` // @@ -1484,11 +1484,9 @@ func (s *InstanceGroupConfig) Validate() error { type InstanceGroupDetail struct { _ struct{} `type:"structure"` - // The maximum Spot price your are willing to pay for EC2 instances. - // - // An optional, nullable field that applies if the MarketType for the instance - // group is specified as SPOT. Specified in USD. If the value is NULL and SPOT - // is specified, the maximum Spot price is set equal to the On-Demand price. + // The bid price for each EC2 Spot instance type as defined by InstanceType. + // Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice + // is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%. BidPrice *string `type:"string"` // The date/time the instance group was created. @@ -1904,14 +1902,14 @@ type JobFlowDetail struct { // is empty. SupportedProducts []string `type:"list"` - // This member will be deprecated. - // - // Specifies whether the cluster is visible to all IAM users of the AWS account - // associated with the cluster. If this value is set to true, all IAM users - // of that AWS account can view and (if they have the proper policy permissions - // set) manage the cluster. If it is set to false, only the IAM user that created - // the cluster can view and manage it. This value can be changed using the SetVisibleToAllUsers - // action. + // Indicates whether the cluster is visible to all IAM users of the AWS account + // associated with the cluster. The default value, true, indicates that all + // IAM users in the AWS account can perform cluster actions if they have the + // proper IAM policy permissions. If this value is false, only the IAM user + // that created the cluster can perform actions. This value can be changed on + // a running cluster by using the SetVisibleToAllUsers action. You can override + // the default value of true when you create a cluster by using the VisibleToAllUsers + // parameter of the RunJobFlow action. VisibleToAllUsers *bool `type:"boolean"` } diff --git a/service/firehose/api_enums.go b/service/firehose/api_enums.go index d8614be032e..3547b53631f 100644 --- a/service/firehose/api_enums.go +++ b/service/firehose/api_enums.go @@ -25,10 +25,12 @@ type DeliveryStreamEncryptionStatus string // Enum values for DeliveryStreamEncryptionStatus const ( - DeliveryStreamEncryptionStatusEnabled DeliveryStreamEncryptionStatus = "ENABLED" - DeliveryStreamEncryptionStatusEnabling DeliveryStreamEncryptionStatus = "ENABLING" - DeliveryStreamEncryptionStatusDisabled DeliveryStreamEncryptionStatus = "DISABLED" - DeliveryStreamEncryptionStatusDisabling DeliveryStreamEncryptionStatus = "DISABLING" + DeliveryStreamEncryptionStatusEnabled DeliveryStreamEncryptionStatus = "ENABLED" + DeliveryStreamEncryptionStatusEnabling DeliveryStreamEncryptionStatus = "ENABLING" + DeliveryStreamEncryptionStatusEnablingFailed DeliveryStreamEncryptionStatus = "ENABLING_FAILED" + DeliveryStreamEncryptionStatusDisabled DeliveryStreamEncryptionStatus = "DISABLED" + DeliveryStreamEncryptionStatusDisabling DeliveryStreamEncryptionStatus = "DISABLING" + DeliveryStreamEncryptionStatusDisablingFailed DeliveryStreamEncryptionStatus = "DISABLING_FAILED" ) func (enum DeliveryStreamEncryptionStatus) MarshalValue() (string, error) { @@ -40,13 +42,38 @@ func (enum DeliveryStreamEncryptionStatus) MarshalValueBuf(b []byte) ([]byte, er return append(b, enum...), nil } +type DeliveryStreamFailureType string + +// Enum values for DeliveryStreamFailureType +const ( + DeliveryStreamFailureTypeRetireKmsGrantFailed DeliveryStreamFailureType = "RETIRE_KMS_GRANT_FAILED" + DeliveryStreamFailureTypeCreateKmsGrantFailed DeliveryStreamFailureType = "CREATE_KMS_GRANT_FAILED" + DeliveryStreamFailureTypeKmsAccessDenied DeliveryStreamFailureType = "KMS_ACCESS_DENIED" + DeliveryStreamFailureTypeDisabledKmsKey DeliveryStreamFailureType = "DISABLED_KMS_KEY" + DeliveryStreamFailureTypeInvalidKmsKey DeliveryStreamFailureType = "INVALID_KMS_KEY" + DeliveryStreamFailureTypeKmsKeyNotFound DeliveryStreamFailureType = "KMS_KEY_NOT_FOUND" + DeliveryStreamFailureTypeKmsOptInRequired DeliveryStreamFailureType = "KMS_OPT_IN_REQUIRED" + DeliveryStreamFailureTypeUnknownError DeliveryStreamFailureType = "UNKNOWN_ERROR" +) + +func (enum DeliveryStreamFailureType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DeliveryStreamFailureType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type DeliveryStreamStatus string // Enum values for DeliveryStreamStatus const ( - DeliveryStreamStatusCreating DeliveryStreamStatus = "CREATING" - DeliveryStreamStatusDeleting DeliveryStreamStatus = "DELETING" - DeliveryStreamStatusActive DeliveryStreamStatus = "ACTIVE" + DeliveryStreamStatusCreating DeliveryStreamStatus = "CREATING" + DeliveryStreamStatusCreatingFailed DeliveryStreamStatus = "CREATING_FAILED" + DeliveryStreamStatusDeleting DeliveryStreamStatus = "DELETING" + DeliveryStreamStatusDeletingFailed DeliveryStreamStatus = "DELETING_FAILED" + DeliveryStreamStatusActive DeliveryStreamStatus = "ACTIVE" ) func (enum DeliveryStreamStatus) MarshalValue() (string, error) { @@ -129,6 +156,23 @@ func (enum HECEndpointType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type KeyType string + +// Enum values for KeyType +const ( + KeyTypeAwsOwnedCmk KeyType = "AWS_OWNED_CMK" + KeyTypeCustomerManagedCmk KeyType = "CUSTOMER_MANAGED_CMK" +) + +func (enum KeyType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum KeyType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type NoEncryptionConfig string // Enum values for NoEncryptionConfig diff --git a/service/firehose/api_errors.go b/service/firehose/api_errors.go index 762ed0eb390..e7d134f64e9 100644 --- a/service/firehose/api_errors.go +++ b/service/firehose/api_errors.go @@ -17,6 +17,15 @@ const ( // The specified input parameter has a value that is not valid. ErrCodeInvalidArgumentException = "InvalidArgumentException" + // ErrCodeInvalidKMSResourceException for service response error code + // "InvalidKMSResourceException". + // + // Kinesis Data Firehose throws this exception when an attempt to put records + // or to start or stop delivery stream encryption fails. This happens when the + // KMS service throws one of the following exception types: AccessDeniedException, + // InvalidStateException, DisabledException, or NotFoundException. + ErrCodeInvalidKMSResourceException = "InvalidKMSResourceException" + // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // diff --git a/service/firehose/api_op_CreateDeliveryStream.go b/service/firehose/api_op_CreateDeliveryStream.go index 0232edf716e..b4b01820c33 100644 --- a/service/firehose/api_op_CreateDeliveryStream.go +++ b/service/firehose/api_op_CreateDeliveryStream.go @@ -13,6 +13,10 @@ import ( type CreateDeliveryStreamInput struct { _ struct{} `type:"structure"` + // Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed + // for Server-Side Encryption (SSE). + DeliveryStreamEncryptionConfigurationInput *DeliveryStreamEncryptionConfigurationInput `type:"structure"` + // The name of the delivery stream. This name must be unique per AWS account // in the same AWS Region. If the delivery streams are in different accounts // or different Regions, you can have multiple delivery streams with the same @@ -78,6 +82,11 @@ func (s *CreateDeliveryStreamInput) Validate() error { if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) } + if s.DeliveryStreamEncryptionConfigurationInput != nil { + if err := s.DeliveryStreamEncryptionConfigurationInput.Validate(); err != nil { + invalidParams.AddNested("DeliveryStreamEncryptionConfigurationInput", err.(aws.ErrInvalidParams)) + } + } if s.ElasticsearchDestinationConfiguration != nil { if err := s.ElasticsearchDestinationConfiguration.Validate(); err != nil { invalidParams.AddNested("ElasticsearchDestinationConfiguration", err.(aws.ErrInvalidParams)) @@ -145,9 +154,14 @@ const opCreateDeliveryStream = "CreateDeliveryStream" // // This is an asynchronous operation that immediately returns. The initial status // of the delivery stream is CREATING. After the delivery stream is created, -// its status is ACTIVE and it now accepts data. Attempts to send data to a -// delivery stream that is not in the ACTIVE state cause an exception. To check -// the state of a delivery stream, use DescribeDeliveryStream. +// its status is ACTIVE and it now accepts data. If the delivery stream creation +// fails, the status transitions to CREATING_FAILED. Attempts to send data to +// a delivery stream that is not in the ACTIVE state cause an exception. To +// check the state of a delivery stream, use DescribeDeliveryStream. +// +// If the status of a delivery stream is CREATING_FAILED, this status doesn't +// change, and you can't invoke CreateDeliveryStream again on it. However, you +// can invoke the DeleteDeliveryStream operation to delete it. // // A Kinesis Data Firehose delivery stream can be configured to receive records // directly from providers using PutRecord or PutRecordBatch, or it can be configured @@ -156,6 +170,11 @@ const opCreateDeliveryStream = "CreateDeliveryStream" // and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in // the KinesisStreamSourceConfiguration parameter. // +// To create a delivery stream with server-side encryption (SSE) enabled, include +// DeliveryStreamEncryptionConfigurationInput in your request. This is optional. +// You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing +// delivery stream that doesn't have SSE enabled. +// // A delivery stream is configured with a single destination: Amazon S3, Amazon // ES, Amazon Redshift, or Splunk. You must specify only one of the following // destination configuration parameters: ExtendedS3DestinationConfiguration, diff --git a/service/firehose/api_op_DeleteDeliveryStream.go b/service/firehose/api_op_DeleteDeliveryStream.go index b96293ce4e5..35e359a609d 100644 --- a/service/firehose/api_op_DeleteDeliveryStream.go +++ b/service/firehose/api_op_DeleteDeliveryStream.go @@ -12,6 +12,18 @@ import ( type DeleteDeliveryStreamInput struct { _ struct{} `type:"structure"` + // Set this to true if you want to delete the delivery stream even if Kinesis + // Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose + // might be unable to retire the grant due to a customer error, such as when + // the CMK or the grant are in an invalid state. If you force deletion, you + // can then use the RevokeGrant (https://docs.aws.amazon.com/kms/latest/APIReference/API_RevokeGrant.html) + // operation to revoke the grant you gave to Kinesis Data Firehose. If a failure + // to retire the grant happens due to an AWS KMS issue, Kinesis Data Firehose + // keeps retrying the delete operation. + // + // The default value is false. + AllowForceDelete *bool `type:"boolean"` + // The name of the delivery stream. // // DeliveryStreamName is a required field @@ -56,16 +68,16 @@ const opDeleteDeliveryStream = "DeleteDeliveryStream" // // Deletes a delivery stream and its data. // -// You can delete a delivery stream only if it is in ACTIVE or DELETING state, -// and not in the CREATING state. While the deletion request is in process, -// the delivery stream is in the DELETING state. -// -// To check the state of a delivery stream, use DescribeDeliveryStream. +// To check the state of a delivery stream, use DescribeDeliveryStream. You +// can delete a delivery stream only if it is in one of the following states: +// ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a +// delivery stream that is in the CREATING state. While the deletion request +// is in process, the delivery stream is in the DELETING state. // -// While the delivery stream is DELETING state, the service might continue to -// accept the records, but it doesn't make any guarantees with respect to delivering -// the data. Therefore, as a best practice, you should first stop any applications -// that are sending records before deleting a delivery stream. +// While the delivery stream is in the DELETING state, the service might continue +// to accept records, but it doesn't make any guarantees with respect to delivering +// the data. Therefore, as a best practice, first stop any applications that +// are sending records before you delete a delivery stream. // // // Example sending a request using DeleteDeliveryStreamRequest. // req := client.DeleteDeliveryStreamRequest(params) diff --git a/service/firehose/api_op_DescribeDeliveryStream.go b/service/firehose/api_op_DescribeDeliveryStream.go index a2e871e5779..b95657aef91 100644 --- a/service/firehose/api_op_DescribeDeliveryStream.go +++ b/service/firehose/api_op_DescribeDeliveryStream.go @@ -73,10 +73,16 @@ const opDescribeDeliveryStream = "DescribeDeliveryStream" // DescribeDeliveryStreamRequest returns a request value for making API operation for // Amazon Kinesis Firehose. // -// Describes the specified delivery stream and gets the status. For example, -// after your delivery stream is created, call DescribeDeliveryStream to see -// whether the delivery stream is ACTIVE and therefore ready for data to be -// sent to it. +// Describes the specified delivery stream and its status. For example, after +// your delivery stream is created, call DescribeDeliveryStream to see whether +// the delivery stream is ACTIVE and therefore ready for data to be sent to +// it. +// +// If the status of a delivery stream is CREATING_FAILED, this status doesn't +// change, and you can't invoke CreateDeliveryStream again on it. However, you +// can invoke the DeleteDeliveryStream operation to delete it. If the status +// is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream +// again but with DeleteDeliveryStreamInput$AllowForceDelete set to true. // // // Example sending a request using DescribeDeliveryStreamRequest. // req := client.DescribeDeliveryStreamRequest(params) diff --git a/service/firehose/api_op_StartDeliveryStreamEncryption.go b/service/firehose/api_op_StartDeliveryStreamEncryption.go index e18f26b973e..5ca238273b4 100644 --- a/service/firehose/api_op_StartDeliveryStreamEncryption.go +++ b/service/firehose/api_op_StartDeliveryStreamEncryption.go @@ -12,6 +12,10 @@ import ( type StartDeliveryStreamEncryptionInput struct { _ struct{} `type:"structure"` + // Used to specify the type and Amazon Resource Name (ARN) of the KMS key needed + // for Server-Side Encryption (SSE). + DeliveryStreamEncryptionConfigurationInput *DeliveryStreamEncryptionConfigurationInput `type:"structure"` + // The name of the delivery stream for which you want to enable server-side // encryption (SSE). // @@ -34,6 +38,11 @@ func (s *StartDeliveryStreamEncryptionInput) Validate() error { if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 { invalidParams.Add(aws.NewErrParamMinLen("DeliveryStreamName", 1)) } + if s.DeliveryStreamEncryptionConfigurationInput != nil { + if err := s.DeliveryStreamEncryptionConfigurationInput.Validate(); err != nil { + invalidParams.AddNested("DeliveryStreamEncryptionConfigurationInput", err.(aws.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -58,15 +67,32 @@ const opStartDeliveryStreamEncryption = "StartDeliveryStreamEncryption" // Enables server-side encryption (SSE) for the delivery stream. // // This operation is asynchronous. It returns immediately. When you invoke it, -// Kinesis Data Firehose first sets the status of the stream to ENABLING, and -// then to ENABLED. You can continue to read and write data to your stream while -// its status is ENABLING, but the data is not encrypted. It can take up to -// 5 seconds after the encryption status changes to ENABLED before all records -// written to the delivery stream are encrypted. To find out whether a record -// or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted -// and PutRecordBatchOutput$Encrypted, respectively. +// Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, +// and then to ENABLED. The encryption status of a delivery stream is the Status +// property in DeliveryStreamEncryptionConfiguration. If the operation fails, +// the encryption status changes to ENABLING_FAILED. You can continue to read +// and write data to your delivery stream while the encryption status is ENABLING, +// but the data is not encrypted. It can take up to 5 seconds after the encryption +// status changes to ENABLED before all records written to the delivery stream +// are encrypted. To find out whether a record or a batch of records was encrypted, +// check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, +// respectively. +// +// To check the encryption status of a delivery stream, use DescribeDeliveryStream. +// +// Even if encryption is currently enabled for a delivery stream, you can still +// invoke this operation on it to change the ARN of the CMK or both its type +// and ARN. In this case, Kinesis Data Firehose schedules the grant it had on +// the old CMK for retirement and creates a grant that enables it to use the +// new CMK to encrypt and decrypt data and to manage the grant. +// +// If a delivery stream already has encryption enabled and then you invoke this +// operation to change the ARN of the CMK or both its type and ARN and you get +// ENABLING_FAILED, this only means that the attempt to change the CMK failed. +// In this case, encryption remains enabled with the old CMK. // -// To check the encryption state of a delivery stream, use DescribeDeliveryStream. +// If the encryption status of your delivery stream is ENABLING_FAILED, you +// can invoke this operation again. // // You can only enable SSE for a delivery stream that uses DirectPut as its // source. diff --git a/service/firehose/api_op_StopDeliveryStreamEncryption.go b/service/firehose/api_op_StopDeliveryStreamEncryption.go index a5d30b16d6a..c140aea8de8 100644 --- a/service/firehose/api_op_StopDeliveryStreamEncryption.go +++ b/service/firehose/api_op_StopDeliveryStreamEncryption.go @@ -58,8 +58,8 @@ const opStopDeliveryStreamEncryption = "StopDeliveryStreamEncryption" // Disables server-side encryption (SSE) for the delivery stream. // // This operation is asynchronous. It returns immediately. When you invoke it, -// Kinesis Data Firehose first sets the status of the stream to DISABLING, and -// then to DISABLED. You can continue to read and write data to your stream +// Kinesis Data Firehose first sets the encryption status of the stream to DISABLING, +// and then to DISABLED. You can continue to read and write data to your stream // while its status is DISABLING. It can take up to 5 seconds after the encryption // status changes to DISABLED before all records written to the delivery stream // are no longer subject to encryption. To find out whether a record or a batch @@ -68,6 +68,11 @@ const opStopDeliveryStreamEncryption = "StopDeliveryStreamEncryption" // // To check the encryption state of a delivery stream, use DescribeDeliveryStream. // +// If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, +// Kinesis Data Firehose schedules the related KMS grant for retirement and +// then retires it after it ensures that it is finished delivering records to +// the destination. +// // The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations // have a combined limit of 25 calls per delivery stream per 24 hours. For example, // you reach the limit if you call StartDeliveryStreamEncryption 13 times and diff --git a/service/firehose/api_types.go b/service/firehose/api_types.go index 05fc53d3e24..60180cd95c3 100644 --- a/service/firehose/api_types.go +++ b/service/firehose/api_types.go @@ -204,7 +204,10 @@ type DeliveryStreamDescription struct { // DeliveryStreamName is a required field DeliveryStreamName *string `min:"1" type:"string" required:"true"` - // The status of the delivery stream. + // The status of the delivery stream. If the status of a delivery stream is + // CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream + // again on it. However, you can invoke the DeleteDeliveryStream operation to + // delete it. // // DeliveryStreamStatus is a required field DeliveryStreamStatus DeliveryStreamStatus `type:"string" required:"true" enum:"true"` @@ -224,6 +227,11 @@ type DeliveryStreamDescription struct { // Destinations is a required field Destinations []DestinationDescription `type:"list" required:"true"` + // Provides details in case one of the following operations fails due to an + // error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, + // StopDeliveryStreamEncryption. + FailureDescription *FailureDescription `type:"structure"` + // Indicates whether there are more destinations available to list. // // HasMoreDestinations is a required field @@ -250,12 +258,32 @@ func (s DeliveryStreamDescription) String() string { return awsutil.Prettify(s) } -// Indicates the server-side encryption (SSE) status for the delivery stream. +// Contains information about the server-side encryption (SSE) status for the +// delivery stream, the type customer master key (CMK) in use, if any, and the +// ARN of the CMK. You can get DeliveryStreamEncryptionConfiguration by invoking +// the DescribeDeliveryStream operation. type DeliveryStreamEncryptionConfiguration struct { _ struct{} `type:"structure"` + // Provides details in case one of the following operations fails due to an + // error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, + // StopDeliveryStreamEncryption. + FailureDescription *FailureDescription `type:"structure"` + + // If KeyType is CUSTOMER_MANAGED_CMK, this field contains the ARN of the customer + // managed CMK. If KeyType is AWS_OWNED_CMK, DeliveryStreamEncryptionConfiguration + // doesn't contain a value for KeyARN. + KeyARN *string `min:"1" type:"string"` + + // Indicates the type of customer master key (CMK) that is used for encryption. + // The default setting is AWS_OWNED_CMK. For more information about CMKs, see + // Customer Master Keys (CMKs) (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys). + KeyType KeyType `type:"string" enum:"true"` + + // This is the server-side encryption (SSE) status for the delivery stream. // For a full description of the different values of this status, see StartDeliveryStreamEncryption - // and StopDeliveryStreamEncryption. + // and StopDeliveryStreamEncryption. If this status is ENABLING_FAILED or DISABLING_FAILED, + // it is the status of the most recent attempt to enable or disable SSE, respectively. Status DeliveryStreamEncryptionStatus `type:"string" enum:"true"` } @@ -264,6 +292,55 @@ func (s DeliveryStreamEncryptionConfiguration) String() string { return awsutil.Prettify(s) } +// Used to specify the type and Amazon Resource Name (ARN) of the CMK needed +// for Server-Side Encryption (SSE). +type DeliveryStreamEncryptionConfigurationInput struct { + _ struct{} `type:"structure"` + + // If you set KeyType to CUSTOMER_MANAGED_CMK, you must specify the Amazon Resource + // Name (ARN) of the CMK. If you set KeyType to AWS_OWNED_CMK, Kinesis Data + // Firehose uses a service-account CMK. + KeyARN *string `min:"1" type:"string"` + + // Indicates the type of customer master key (CMK) to use for encryption. The + // default setting is AWS_OWNED_CMK. For more information about CMKs, see Customer + // Master Keys (CMKs) (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys). + // When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with + // KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon + // KMS operation CreateGrant (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateGrant.html) + // to create a grant that allows the Kinesis Data Firehose service to use the + // customer managed CMK to perform encryption and decryption. Kinesis Data Firehose + // manages that grant. + // + // When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery + // stream that is already encrypted with a customer managed CMK, Kinesis Data + // Firehose schedules the grant it had on the old CMK for retirement. + // + // KeyType is a required field + KeyType KeyType `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s DeliveryStreamEncryptionConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeliveryStreamEncryptionConfigurationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeliveryStreamEncryptionConfigurationInput"} + if s.KeyARN != nil && len(*s.KeyARN) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("KeyARN", 1)) + } + if len(s.KeyType) == 0 { + invalidParams.Add(aws.NewErrParamRequired("KeyType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The deserializer you want Kinesis Data Firehose to use for converting the // input data from JSON. Kinesis Data Firehose then serializes the data to its // final format using the Serializer. Kinesis Data Firehose supports two types @@ -964,6 +1041,28 @@ func (s *ExtendedS3DestinationUpdate) Validate() error { return nil } +// Provides details in case one of the following operations fails due to an +// error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, +// StopDeliveryStreamEncryption. +type FailureDescription struct { + _ struct{} `type:"structure"` + + // A message providing details about the error that caused the failure. + // + // Details is a required field + Details *string `type:"string" required:"true"` + + // The type of error that caused the failure. + // + // Type is a required field + Type DeliveryStreamFailureType `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s FailureDescription) String() string { + return awsutil.Prettify(s) +} + // The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing // data, which means converting it from the JSON format in preparation for serializing // it to the Parquet or ORC format. This is one of two deserializers you can @@ -1270,7 +1369,7 @@ type ParquetSerDe struct { // The compression code to use over data blocks. The possible values are UNCOMPRESSED, // SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression - // speed. Use GZIP if the compression ration is more important than speed. + // speed. Use GZIP if the compression ratio is more important than speed. Compression ParquetCompression `type:"string" enum:"true"` // Indicates whether to enable dictionary compression. diff --git a/service/fsx/api_enums.go b/service/fsx/api_enums.go index ccfc84027a4..c7ec43887a7 100644 --- a/service/fsx/api_enums.go +++ b/service/fsx/api_enums.go @@ -163,3 +163,20 @@ func (enum ServiceLimit) MarshalValueBuf(b []byte) ([]byte, error) { b = b[0:0] return append(b, enum...), nil } + +type WindowsDeploymentType string + +// Enum values for WindowsDeploymentType +const ( + WindowsDeploymentTypeMultiAz1 WindowsDeploymentType = "MULTI_AZ_1" + WindowsDeploymentTypeSingleAz1 WindowsDeploymentType = "SINGLE_AZ_1" +) + +func (enum WindowsDeploymentType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum WindowsDeploymentType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} diff --git a/service/fsx/api_op_CreateFileSystem.go b/service/fsx/api_op_CreateFileSystem.go index 7a741df8356..e33e90dd33c 100644 --- a/service/fsx/api_op_CreateFileSystem.go +++ b/service/fsx/api_op_CreateFileSystem.go @@ -41,18 +41,23 @@ type CreateFileSystemInput struct { // The storage capacity of the file system being created. // - // For Windows file systems, the storage capacity has a minimum of 300 GiB, - // and a maximum of 65,536 GiB. + // For Windows file systems, valid values are 32 GiB - 65,536 GiB. // - // For Lustre file systems, the storage capacity has a minimum of 3,600 GiB. - // Storage capacity is provisioned in increments of 3,600 GiB. + // For Lustre file systems, valid values are 1,200, 2,400, 3,600, then continuing + // in increments of 3600 GiB. // // StorageCapacity is a required field StorageCapacity *int64 `min:"1" type:"integer" required:"true"` - // The IDs of the subnets that the file system will be accessible from. File - // systems support only one subnet. The file server is also launched in that - // subnet's Availability Zone. + // Specifies the IDs of the subnets that the file system will be accessible + // from. For Windows MULTI_AZ_1 file system deployment types, provide exactly + // two subnet IDs, one for the preferred file server and one for the standy + // file server. You specify one of these subnets as the preferred subnet using + // the WindowsConfiguration > PreferredSubnetID property. + // + // For Windows SINGLE_AZ_1 file system deployment types and Lustre file systems, + // provide exactly one subnet ID. The file server is launched in that subnet's + // Availability Zone. // // SubnetIds is a required field SubnetIds []string `type:"list" required:"true"` diff --git a/service/fsx/api_types.go b/service/fsx/api_types.go index a60b2191f0b..0ff259b02b8 100644 --- a/service/fsx/api_types.go +++ b/service/fsx/api_types.go @@ -196,6 +196,27 @@ type CreateFileSystemWindowsConfiguration struct { // UTC time zone. DailyAutomaticBackupStartTime *string `min:"5" type:"string"` + // Specifies the file system deployment type, valid values are the following: + // + // * MULTI_AZ_1 - Deploys a high availability file system that is configured + // for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. + // You can only deploy a Multi-AZ file system in AWS Regions that have a + // minimum of three Availability Zones. + // + // * SINGLE_AZ_1 - (Default) Choose to deploy a file system that is configured + // for single AZ redundancy. + // + // To learn more about high availability Multi-AZ file systems, see High Availability + // for Amazon FSx for Windows File Server (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html). + DeploymentType WindowsDeploymentType `type:"string" enum:"true"` + + // Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet + // in which you want the preferred file server to be located. For in-AWS applications, + // we recommend that you launch your clients in the same Availability Zone (AZ) + // as your preferred file server to reduce cross-AZ data transfer costs and + // minimize latency. + PreferredSubnetId *string `min:"15" type:"string"` + // The configuration that Amazon FSx uses to join the Windows File Server instance // to your self-managed (including on-premises) Microsoft Active Directory (AD) // directory. @@ -226,6 +247,9 @@ func (s *CreateFileSystemWindowsConfiguration) Validate() error { if s.DailyAutomaticBackupStartTime != nil && len(*s.DailyAutomaticBackupStartTime) < 5 { invalidParams.Add(aws.NewErrParamMinLen("DailyAutomaticBackupStartTime", 5)) } + if s.PreferredSubnetId != nil && len(*s.PreferredSubnetId) < 15 { + invalidParams.Add(aws.NewErrParamMinLen("PreferredSubnetId", 15)) + } if s.ThroughputCapacity == nil { invalidParams.Add(aws.NewErrParamRequired("ThroughputCapacity")) @@ -361,18 +385,19 @@ type FileSystem struct { // file system's data for an Amazon FSx for Windows File Server file system. KmsKeyId *string `min:"1" type:"string"` - // The lifecycle status of the file system: + // The lifecycle status of the file system, following are the possible values + // and what they mean: // - // * AVAILABLE indicates that the file system is reachable and available - // for use. + // * AVAILABLE - The file system is in a healthy state, and is reachable + // and available for use. // - // * CREATING indicates that Amazon FSx is in the process of creating the - // new file system. + // * CREATING - Amazon FSx is creating the new file system. // - // * DELETING indicates that Amazon FSx is in the process of deleting the - // file system. + // * DELETING - Amazon FSx is deleting an existing file system. // - // * FAILED indicates that Amazon FSx was not able to create the file system. + // * FAILED - An existing file system has experienced an unrecoverable failure. + // When creating a new file system, Amazon FSx was unable to create the file + // system. // // * MISCONFIGURED indicates that the file system is in a failed but recoverable // state. @@ -537,8 +562,9 @@ type SelfManagedActiveDirectoryConfiguration struct { // (Optional) The name of the domain group whose members are granted administrative // privileges for the file system. Administrative privileges include taking - // ownership of files and folders, and setting audit controls (audit ACLs) on - // files and folders. The group that you specify must already exist in your + // ownership of files and folders, setting audit controls (audit ACLs) on files + // and folders, and administering the file system remotely by using the FSx + // Remote PowerShell. The group that you specify must already exist in your // domain. If you don't provide one, your AD domain's Domain Admins group is // used. FileSystemAdministratorsGroup *string `min:"1" type:"string"` @@ -790,9 +816,48 @@ type WindowsFileSystemConfiguration struct { // The preferred time to take daily automatic backups, in the UTC time zone. DailyAutomaticBackupStartTime *string `min:"5" type:"string"` + // Specifies the file system deployment type, valid values are the following: + // + // * MULTI_AZ_1 - Specifies a high availability file system that is configured + // for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. + // + // * SINGLE_AZ_1 - (Default) Specifies a file system that is configured for + // single AZ redundancy. + DeploymentType WindowsDeploymentType `type:"string" enum:"true"` + // The list of maintenance operations in progress for this file system. MaintenanceOperationsInProgress []FileSystemMaintenanceOperation `type:"list"` + // For MULTI_AZ_1 deployment types, the IP address of the primary, or preferred, + // file server. + // + // Use this IP address when mounting the file system on Linux SMB clients or + // Windows SMB clients that are not joined to a Microsoft Active Directory. + // Applicable for both SINGLE_AZ_1 and MULTI_AZ_1 deployment types. This IP + // address is temporarily unavailable when the file system is undergoing maintenance. + // For Linux and Windows SMB clients that are joined to an Active Directory, + // use the file system's DNSName instead. For more information and instruction + // on mapping and mounting file shares, see https://docs.aws.amazon.com/fsx/latest/WindowsGuide/accessing-file-shares.html + // (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/accessing-file-shares.html). + PreferredFileServerIp *string `type:"string"` + + // For MULTI_AZ_1 deployment types, it specifies the ID of the subnet where + // the preferred file server is located. Must be one of the two subnet IDs specified + // in SubnetIds property. Amazon FSx serves traffic from this subnet except + // in the event of a failover to the secondary file server. + // + // For SINGLE_AZ_1 deployment types, this value is the same as that for SubnetIDs. + PreferredSubnetId *string `min:"15" type:"string"` + + // For MULTI_AZ_1 deployment types, use this endpoint when performing administrative + // tasks on the file system using Amazon FSx Remote PowerShell. + // + // For SINGLE_AZ_1 deployment types, this is the DNS name of the file system. + // + // This endpoint is temporarily unavailable when the file system is undergoing + // maintenance. + RemoteAdministrationEndpoint *string `min:"16" type:"string"` + // The configuration of the self-managed Microsoft Active Directory (AD) directory // to which the Windows File Server instance is joined. SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryAttributes `type:"structure"` diff --git a/service/guardduty/api_enums.go b/service/guardduty/api_enums.go index 0fe774361cf..1908c08061f 100644 --- a/service/guardduty/api_enums.go +++ b/service/guardduty/api_enums.go @@ -2,6 +2,22 @@ package guardduty +type DestinationType string + +// Enum values for DestinationType +const ( + DestinationTypeS3 DestinationType = "S3" +) + +func (enum DestinationType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DestinationType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type DetectorStatus string // Enum values for DetectorStatus @@ -147,6 +163,25 @@ func (enum OrderBy) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type PublishingStatus string + +// Enum values for PublishingStatus +const ( + PublishingStatusPendingVerification PublishingStatus = "PENDING_VERIFICATION" + PublishingStatusPublishing PublishingStatus = "PUBLISHING" + PublishingStatusUnableToPublishFixDestinationProperty PublishingStatus = "UNABLE_TO_PUBLISH_FIX_DESTINATION_PROPERTY" + PublishingStatusStopped PublishingStatus = "STOPPED" +) + +func (enum PublishingStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum PublishingStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type ThreatIntelSetFormat string // Enum values for ThreatIntelSetFormat diff --git a/service/guardduty/api_op_CreateIPSet.go b/service/guardduty/api_op_CreateIPSet.go index cfef92f0f30..aaf0ef17792 100644 --- a/service/guardduty/api_op_CreateIPSet.go +++ b/service/guardduty/api_op_CreateIPSet.go @@ -186,8 +186,11 @@ const opCreateIPSet = "CreateIPSet" // CreateIPSetRequest returns a request value for making API operation for // Amazon GuardDuty. // -// Creates a new IPSet - a list of trusted IP addresses that have been whitelisted -// for secure communication with AWS infrastructure and applications. +// Creates a new IPSet, called Trusted IP list in the consoler user interface. +// An IPSet is a list IP addresses trusted for secure communication with AWS +// infrastructure and applications. GuardDuty does not generate findings for +// IP addresses included in IPSets. Only users from the master account can use +// this operation. // // // Example sending a request using CreateIPSetRequest. // req := client.CreateIPSetRequest(params) diff --git a/service/guardduty/api_op_CreatePublishingDestination.go b/service/guardduty/api_op_CreatePublishingDestination.go new file mode 100644 index 00000000000..84406e75f0a --- /dev/null +++ b/service/guardduty/api_op_CreatePublishingDestination.go @@ -0,0 +1,195 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package guardduty + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreatePublishingDestinationInput struct { + _ struct{} `type:"structure"` + + // The idempotency token for the request. + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + + // Properties of the publishing destination, including the ARNs for the destination + // and the KMS key used for encryption. + // + // DestinationProperties is a required field + DestinationProperties *DestinationProperties `locationName:"destinationProperties" type:"structure" required:"true"` + + // The type of resource for the publishing destination. Currently only S3 is + // supported. + // + // DestinationType is a required field + DestinationType DestinationType `locationName:"destinationType" min:"1" type:"string" required:"true" enum:"true"` + + // The ID of the GuardDuty detector associated with the publishing destination. + // + // DetectorId is a required field + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePublishingDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePublishingDestinationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreatePublishingDestinationInput"} + + if s.DestinationProperties == nil { + invalidParams.Add(aws.NewErrParamRequired("DestinationProperties")) + } + if len(s.DestinationType) == 0 { + invalidParams.Add(aws.NewErrParamRequired("DestinationType")) + } + + if s.DetectorId == nil { + invalidParams.Add(aws.NewErrParamRequired("DetectorId")) + } + if s.DetectorId != nil && len(*s.DetectorId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DetectorId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreatePublishingDestinationInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + var ClientToken string + if s.ClientToken != nil { + ClientToken = *s.ClientToken + } else { + ClientToken = protocol.GetIdempotencyToken() + } + { + v := ClientToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "clientToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DestinationProperties != nil { + v := s.DestinationProperties + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "destinationProperties", v, metadata) + } + if len(s.DestinationType) > 0 { + v := s.DestinationType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "destinationType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.DetectorId != nil { + v := *s.DetectorId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "detectorId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreatePublishingDestinationOutput struct { + _ struct{} `type:"structure"` + + // The ID of the publishing destination created. + // + // DestinationId is a required field + DestinationId *string `locationName:"destinationId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePublishingDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreatePublishingDestinationOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DestinationId != nil { + v := *s.DestinationId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "destinationId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opCreatePublishingDestination = "CreatePublishingDestination" + +// CreatePublishingDestinationRequest returns a request value for making API operation for +// Amazon GuardDuty. +// +// Creates a publishing destination to send findings to. The resource to send +// findings to must exist before you use this operation. +// +// // Example sending a request using CreatePublishingDestinationRequest. +// req := client.CreatePublishingDestinationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreatePublishingDestination +func (c *Client) CreatePublishingDestinationRequest(input *CreatePublishingDestinationInput) CreatePublishingDestinationRequest { + op := &aws.Operation{ + Name: opCreatePublishingDestination, + HTTPMethod: "POST", + HTTPPath: "/detector/{detectorId}/publishingDestination", + } + + if input == nil { + input = &CreatePublishingDestinationInput{} + } + + req := c.newRequest(op, input, &CreatePublishingDestinationOutput{}) + return CreatePublishingDestinationRequest{Request: req, Input: input, Copy: c.CreatePublishingDestinationRequest} +} + +// CreatePublishingDestinationRequest is the request type for the +// CreatePublishingDestination API operation. +type CreatePublishingDestinationRequest struct { + *aws.Request + Input *CreatePublishingDestinationInput + Copy func(*CreatePublishingDestinationInput) CreatePublishingDestinationRequest +} + +// Send marshals and sends the CreatePublishingDestination API request. +func (r CreatePublishingDestinationRequest) Send(ctx context.Context) (*CreatePublishingDestinationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreatePublishingDestinationResponse{ + CreatePublishingDestinationOutput: r.Request.Data.(*CreatePublishingDestinationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreatePublishingDestinationResponse is the response type for the +// CreatePublishingDestination API operation. +type CreatePublishingDestinationResponse struct { + *CreatePublishingDestinationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreatePublishingDestination request. +func (r *CreatePublishingDestinationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/guardduty/api_op_CreateSampleFindings.go b/service/guardduty/api_op_CreateSampleFindings.go index f4aa02a5113..3f16209470f 100644 --- a/service/guardduty/api_op_CreateSampleFindings.go +++ b/service/guardduty/api_op_CreateSampleFindings.go @@ -18,7 +18,7 @@ type CreateSampleFindingsInput struct { // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // Types of sample findings that you want to generate. + // Types of sample findings to generate. FindingTypes []string `locationName:"findingTypes" type:"list"` } diff --git a/service/guardduty/api_op_CreateThreatIntelSet.go b/service/guardduty/api_op_CreateThreatIntelSet.go index f970ee51b17..9c2f8fbaaa6 100644 --- a/service/guardduty/api_op_CreateThreatIntelSet.go +++ b/service/guardduty/api_op_CreateThreatIntelSet.go @@ -186,7 +186,8 @@ const opCreateThreatIntelSet = "CreateThreatIntelSet" // Amazon GuardDuty. // // Create a new ThreatIntelSet. ThreatIntelSets consist of known malicious IP -// addresses. GuardDuty generates findings based on ThreatIntelSets. +// addresses. GuardDuty generates findings based on ThreatIntelSets. Only users +// of the master account can use this operation. // // // Example sending a request using CreateThreatIntelSetRequest. // req := client.CreateThreatIntelSetRequest(params) diff --git a/service/guardduty/api_op_DeleteIPSet.go b/service/guardduty/api_op_DeleteIPSet.go index b04b4d48c0a..d02fc07313b 100644 --- a/service/guardduty/api_op_DeleteIPSet.go +++ b/service/guardduty/api_op_DeleteIPSet.go @@ -13,12 +13,12 @@ import ( type DeleteIPSetInput struct { _ struct{} `type:"structure"` - // The unique ID of the detector the ipSet is associated with. + // The unique ID of the detector associated with the IPSet. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // The unique ID of the ipSet you want to delete. + // The unique ID of the IPSet to delete. // // IpSetId is a required field IpSetId *string `location:"uri" locationName:"ipSetId" type:"string" required:"true"` @@ -88,7 +88,8 @@ const opDeleteIPSet = "DeleteIPSet" // DeleteIPSetRequest returns a request value for making API operation for // Amazon GuardDuty. // -// Deletes the IPSet specified by the IPSet ID. +// Deletes the IPSet specified by the ipSetId. IPSets are called Trusted IP +// lists in the console user interface. // // // Example sending a request using DeleteIPSetRequest. // req := client.DeleteIPSetRequest(params) diff --git a/service/guardduty/api_op_DeletePublishingDestination.go b/service/guardduty/api_op_DeletePublishingDestination.go new file mode 100644 index 00000000000..941e80614d3 --- /dev/null +++ b/service/guardduty/api_op_DeletePublishingDestination.go @@ -0,0 +1,153 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package guardduty + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DeletePublishingDestinationInput struct { + _ struct{} `type:"structure"` + + // The ID of the publishing destination to delete. + // + // DestinationId is a required field + DestinationId *string `location:"uri" locationName:"destinationId" type:"string" required:"true"` + + // The unique ID of the detector associated with the publishing destination + // to delete. + // + // DetectorId is a required field + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePublishingDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePublishingDestinationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeletePublishingDestinationInput"} + + if s.DestinationId == nil { + invalidParams.Add(aws.NewErrParamRequired("DestinationId")) + } + + if s.DetectorId == nil { + invalidParams.Add(aws.NewErrParamRequired("DetectorId")) + } + if s.DetectorId != nil && len(*s.DetectorId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DetectorId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeletePublishingDestinationInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DestinationId != nil { + v := *s.DestinationId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "destinationId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DetectorId != nil { + v := *s.DetectorId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "detectorId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeletePublishingDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePublishingDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeletePublishingDestinationOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeletePublishingDestination = "DeletePublishingDestination" + +// DeletePublishingDestinationRequest returns a request value for making API operation for +// Amazon GuardDuty. +// +// Deletes the publishing definition with the specified destinationId. +// +// // Example sending a request using DeletePublishingDestinationRequest. +// req := client.DeletePublishingDestinationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeletePublishingDestination +func (c *Client) DeletePublishingDestinationRequest(input *DeletePublishingDestinationInput) DeletePublishingDestinationRequest { + op := &aws.Operation{ + Name: opDeletePublishingDestination, + HTTPMethod: "DELETE", + HTTPPath: "/detector/{detectorId}/publishingDestination/{destinationId}", + } + + if input == nil { + input = &DeletePublishingDestinationInput{} + } + + req := c.newRequest(op, input, &DeletePublishingDestinationOutput{}) + return DeletePublishingDestinationRequest{Request: req, Input: input, Copy: c.DeletePublishingDestinationRequest} +} + +// DeletePublishingDestinationRequest is the request type for the +// DeletePublishingDestination API operation. +type DeletePublishingDestinationRequest struct { + *aws.Request + Input *DeletePublishingDestinationInput + Copy func(*DeletePublishingDestinationInput) DeletePublishingDestinationRequest +} + +// Send marshals and sends the DeletePublishingDestination API request. +func (r DeletePublishingDestinationRequest) Send(ctx context.Context) (*DeletePublishingDestinationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeletePublishingDestinationResponse{ + DeletePublishingDestinationOutput: r.Request.Data.(*DeletePublishingDestinationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeletePublishingDestinationResponse is the response type for the +// DeletePublishingDestination API operation. +type DeletePublishingDestinationResponse struct { + *DeletePublishingDestinationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeletePublishingDestination request. +func (r *DeletePublishingDestinationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/guardduty/api_op_DescribePublishingDestination.go b/service/guardduty/api_op_DescribePublishingDestination.go new file mode 100644 index 00000000000..e67839be9d0 --- /dev/null +++ b/service/guardduty/api_op_DescribePublishingDestination.go @@ -0,0 +1,211 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package guardduty + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribePublishingDestinationInput struct { + _ struct{} `type:"structure"` + + // The ID of the publishing destination to retrieve. + // + // DestinationId is a required field + DestinationId *string `location:"uri" locationName:"destinationId" type:"string" required:"true"` + + // The unique ID of the detector associated with the publishing destination + // to retrieve. + // + // DetectorId is a required field + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribePublishingDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribePublishingDestinationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribePublishingDestinationInput"} + + if s.DestinationId == nil { + invalidParams.Add(aws.NewErrParamRequired("DestinationId")) + } + + if s.DetectorId == nil { + invalidParams.Add(aws.NewErrParamRequired("DetectorId")) + } + if s.DetectorId != nil && len(*s.DetectorId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DetectorId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribePublishingDestinationInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DestinationId != nil { + v := *s.DestinationId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "destinationId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DetectorId != nil { + v := *s.DetectorId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "detectorId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribePublishingDestinationOutput struct { + _ struct{} `type:"structure"` + + // The ID of the publishing destination. + // + // DestinationId is a required field + DestinationId *string `locationName:"destinationId" type:"string" required:"true"` + + // A DestinationProperties object that includes the DestinationArn and KmsKeyArn + // of the publishing destination. + // + // DestinationProperties is a required field + DestinationProperties *DestinationProperties `locationName:"destinationProperties" type:"structure" required:"true"` + + // The type of the publishing destination. Currently, only S3 is supported. + // + // DestinationType is a required field + DestinationType DestinationType `locationName:"destinationType" min:"1" type:"string" required:"true" enum:"true"` + + // The time, in epoch millisecond format, at which GuardDuty was first unable + // to publish findings to the destination. + // + // PublishingFailureStartTimestamp is a required field + PublishingFailureStartTimestamp *int64 `locationName:"publishingFailureStartTimestamp" type:"long" required:"true"` + + // The status of the publishing destination. + // + // Status is a required field + Status PublishingStatus `locationName:"status" min:"1" type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s DescribePublishingDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribePublishingDestinationOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DestinationId != nil { + v := *s.DestinationId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "destinationId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DestinationProperties != nil { + v := s.DestinationProperties + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "destinationProperties", v, metadata) + } + if len(s.DestinationType) > 0 { + v := s.DestinationType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "destinationType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.PublishingFailureStartTimestamp != nil { + v := *s.PublishingFailureStartTimestamp + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "publishingFailureStartTimestamp", protocol.Int64Value(v), metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "status", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +const opDescribePublishingDestination = "DescribePublishingDestination" + +// DescribePublishingDestinationRequest returns a request value for making API operation for +// Amazon GuardDuty. +// +// Returns information about the publishing destination specified by the provided +// destinationId. +// +// // Example sending a request using DescribePublishingDestinationRequest. +// req := client.DescribePublishingDestinationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DescribePublishingDestination +func (c *Client) DescribePublishingDestinationRequest(input *DescribePublishingDestinationInput) DescribePublishingDestinationRequest { + op := &aws.Operation{ + Name: opDescribePublishingDestination, + HTTPMethod: "GET", + HTTPPath: "/detector/{detectorId}/publishingDestination/{destinationId}", + } + + if input == nil { + input = &DescribePublishingDestinationInput{} + } + + req := c.newRequest(op, input, &DescribePublishingDestinationOutput{}) + return DescribePublishingDestinationRequest{Request: req, Input: input, Copy: c.DescribePublishingDestinationRequest} +} + +// DescribePublishingDestinationRequest is the request type for the +// DescribePublishingDestination API operation. +type DescribePublishingDestinationRequest struct { + *aws.Request + Input *DescribePublishingDestinationInput + Copy func(*DescribePublishingDestinationInput) DescribePublishingDestinationRequest +} + +// Send marshals and sends the DescribePublishingDestination API request. +func (r DescribePublishingDestinationRequest) Send(ctx context.Context) (*DescribePublishingDestinationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribePublishingDestinationResponse{ + DescribePublishingDestinationOutput: r.Request.Data.(*DescribePublishingDestinationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribePublishingDestinationResponse is the response type for the +// DescribePublishingDestination API operation. +type DescribePublishingDestinationResponse struct { + *DescribePublishingDestinationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribePublishingDestination request. +func (r *DescribePublishingDestinationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/guardduty/api_op_GetIPSet.go b/service/guardduty/api_op_GetIPSet.go index 63864c85b38..91d45940234 100644 --- a/service/guardduty/api_op_GetIPSet.go +++ b/service/guardduty/api_op_GetIPSet.go @@ -18,7 +18,7 @@ type GetIPSetInput struct { // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // The unique ID of the ipSet you want to get. + // The unique ID of the IPSet to retrieve. // // IpSetId is a required field IpSetId *string `location:"uri" locationName:"ipSetId" type:"string" required:"true"` @@ -82,9 +82,7 @@ type GetIPSetOutput struct { // Location is a required field Location *string `locationName:"location" min:"1" type:"string" required:"true"` - // The user friendly name to identify the IPSet. This name is displayed in all - // findings that are triggered by activity that involves IP addresses included - // in this IPSet. + // The user friendly name for the IPSet. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -149,7 +147,7 @@ const opGetIPSet = "GetIPSet" // GetIPSetRequest returns a request value for making API operation for // Amazon GuardDuty. // -// Retrieves the IPSet specified by the IPSet ID. +// Retrieves the IPSet specified by the ipSetId. // // // Example sending a request using GetIPSetRequest. // req := client.GetIPSetRequest(params) diff --git a/service/guardduty/api_op_ListFindings.go b/service/guardduty/api_op_ListFindings.go index 9d4800b4cd6..f25f35d2ebd 100644 --- a/service/guardduty/api_op_ListFindings.go +++ b/service/guardduty/api_op_ListFindings.go @@ -19,7 +19,108 @@ type ListFindingsInput struct { // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // Represents the criteria used for querying findings. + // Represents the criteria used for querying findings. Valid values include: + // + // * JSON field name + // + // * accountId + // + // * region + // + // * confidence + // + // * id + // + // * resource.accessKeyDetails.accessKeyId + // + // * resource.accessKeyDetails.principalId + // + // * resource.accessKeyDetails.userName + // + // * resource.accessKeyDetails.userType + // + // * resource.instanceDetails.iamInstanceProfile.id + // + // * resource.instanceDetails.imageId + // + // * resource.instanceDetails.instanceId + // + // * resource.instanceDetails.networkInterfaces.ipv6Addresses + // + // * resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress + // + // * resource.instanceDetails.networkInterfaces.publicDnsName + // + // * resource.instanceDetails.networkInterfaces.publicIp + // + // * resource.instanceDetails.networkInterfaces.securityGroups.groupId + // + // * resource.instanceDetails.networkInterfaces.securityGroups.groupName + // + // * resource.instanceDetails.networkInterfaces.subnetId + // + // * resource.instanceDetails.networkInterfaces.vpcId + // + // * resource.instanceDetails.tags.key + // + // * resource.instanceDetails.tags.value + // + // * resource.resourceType + // + // * service.action.actionType + // + // * service.action.awsApiCallAction.api + // + // * service.action.awsApiCallAction.callerType + // + // * service.action.awsApiCallAction.remoteIpDetails.city.cityName + // + // * service.action.awsApiCallAction.remoteIpDetails.country.countryName + // + // * service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 + // + // * service.action.awsApiCallAction.remoteIpDetails.organization.asn + // + // * service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg + // + // * service.action.awsApiCallAction.serviceName + // + // * service.action.dnsRequestAction.domain + // + // * service.action.networkConnectionAction.blocked + // + // * service.action.networkConnectionAction.connectionDirection + // + // * service.action.networkConnectionAction.localPortDetails.port + // + // * service.action.networkConnectionAction.protocol + // + // * service.action.networkConnectionAction.remoteIpDetails.city.cityName + // + // * service.action.networkConnectionAction.remoteIpDetails.country.countryName + // + // * service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 + // + // * service.action.networkConnectionAction.remoteIpDetails.organization.asn + // + // * service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg + // + // * service.action.networkConnectionAction.remotePortDetails.port + // + // * service.additionalInfo.threatListName + // + // * service.archived When this attribute is set to 'true', only archived + // findings are listed. When it's set to 'false', only unarchived findings + // are listed. When this attribute is not set, all existing findings are + // listed. + // + // * service.resourceRole + // + // * severity + // + // * type + // + // * updatedAt Type: Timestamp in Unix Epoch millisecond format: 1486685375000 FindingCriteria *FindingCriteria `locationName:"findingCriteria" type:"structure"` // You can use this parameter to indicate the maximum number of items you want diff --git a/service/guardduty/api_op_ListIPSets.go b/service/guardduty/api_op_ListIPSets.go index 2d8b50ccc80..15a4a30cf73 100644 --- a/service/guardduty/api_op_ListIPSets.go +++ b/service/guardduty/api_op_ListIPSets.go @@ -125,7 +125,9 @@ const opListIPSets = "ListIPSets" // ListIPSetsRequest returns a request value for making API operation for // Amazon GuardDuty. // -// Lists the IPSets of the GuardDuty service specified by the detector ID. +// Lists the IPSets of the GuardDuty service specified by the detector ID. If +// you use this operation from a member account, the IPSets returned are the +// IPSets from the associated master account. // // // Example sending a request using ListIPSetsRequest. // req := client.ListIPSetsRequest(params) diff --git a/service/guardduty/api_op_ListPublishingDestinations.go b/service/guardduty/api_op_ListPublishingDestinations.go new file mode 100644 index 00000000000..7e8af3f9c55 --- /dev/null +++ b/service/guardduty/api_op_ListPublishingDestinations.go @@ -0,0 +1,244 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package guardduty + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListPublishingDestinationsInput struct { + _ struct{} `type:"structure"` + + // The ID of the detector to retrieve publishing destinations for. + // + // DetectorId is a required field + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` + + // The maximum number of results to return in the response. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // A token to use for paginating results returned in the repsonse. Set the value + // of this parameter to null for the first request to a list action. For subsequent + // calls, use the NextToken value returned from the previous request to continue + // listing results after the first page. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListPublishingDestinationsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPublishingDestinationsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListPublishingDestinationsInput"} + + if s.DetectorId == nil { + invalidParams.Add(aws.NewErrParamRequired("DetectorId")) + } + if s.DetectorId != nil && len(*s.DetectorId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DetectorId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListPublishingDestinationsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DetectorId != nil { + v := *s.DetectorId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "detectorId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "maxResults", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListPublishingDestinationsOutput struct { + _ struct{} `type:"structure"` + + // A Destinations obect that includes information about each publishing destination + // returned. + // + // Destinations is a required field + Destinations []Destination `locationName:"destinations" type:"list" required:"true"` + + // A token to use for paginating results returned in the repsonse. Set the value + // of this parameter to null for the first request to a list action. For subsequent + // calls, use the NextToken value returned from the previous request to continue + // listing results after the first page. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListPublishingDestinationsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListPublishingDestinationsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Destinations != nil { + v := s.Destinations + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "destinations", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListPublishingDestinations = "ListPublishingDestinations" + +// ListPublishingDestinationsRequest returns a request value for making API operation for +// Amazon GuardDuty. +// +// Returns a list of publishing destinations associated with the specified dectectorId. +// +// // Example sending a request using ListPublishingDestinationsRequest. +// req := client.ListPublishingDestinationsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListPublishingDestinations +func (c *Client) ListPublishingDestinationsRequest(input *ListPublishingDestinationsInput) ListPublishingDestinationsRequest { + op := &aws.Operation{ + Name: opListPublishingDestinations, + HTTPMethod: "GET", + HTTPPath: "/detector/{detectorId}/publishingDestination", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPublishingDestinationsInput{} + } + + req := c.newRequest(op, input, &ListPublishingDestinationsOutput{}) + return ListPublishingDestinationsRequest{Request: req, Input: input, Copy: c.ListPublishingDestinationsRequest} +} + +// ListPublishingDestinationsRequest is the request type for the +// ListPublishingDestinations API operation. +type ListPublishingDestinationsRequest struct { + *aws.Request + Input *ListPublishingDestinationsInput + Copy func(*ListPublishingDestinationsInput) ListPublishingDestinationsRequest +} + +// Send marshals and sends the ListPublishingDestinations API request. +func (r ListPublishingDestinationsRequest) Send(ctx context.Context) (*ListPublishingDestinationsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListPublishingDestinationsResponse{ + ListPublishingDestinationsOutput: r.Request.Data.(*ListPublishingDestinationsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListPublishingDestinationsRequestPaginator returns a paginator for ListPublishingDestinations. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListPublishingDestinationsRequest(input) +// p := guardduty.NewListPublishingDestinationsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListPublishingDestinationsPaginator(req ListPublishingDestinationsRequest) ListPublishingDestinationsPaginator { + return ListPublishingDestinationsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListPublishingDestinationsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListPublishingDestinationsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListPublishingDestinationsPaginator struct { + aws.Pager +} + +func (p *ListPublishingDestinationsPaginator) CurrentPage() *ListPublishingDestinationsOutput { + return p.Pager.CurrentPage().(*ListPublishingDestinationsOutput) +} + +// ListPublishingDestinationsResponse is the response type for the +// ListPublishingDestinations API operation. +type ListPublishingDestinationsResponse struct { + *ListPublishingDestinationsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListPublishingDestinations request. +func (r *ListPublishingDestinationsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/guardduty/api_op_ListThreatIntelSets.go b/service/guardduty/api_op_ListThreatIntelSets.go index 108214c5b80..d4954a0cc40 100644 --- a/service/guardduty/api_op_ListThreatIntelSets.go +++ b/service/guardduty/api_op_ListThreatIntelSets.go @@ -22,10 +22,10 @@ type ListThreatIntelSetsInput struct { // in the response. The default value is 50. The maximum value is 50. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // You can use this parameter when paginating results. Set the value of this - // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // You can use this parameter to paginate results in the response. Set the value + // of this parameter to null on your first call to the list action. For subsequent + // calls to the action fill nextToken in the request with the value of NextToken + // from the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -126,7 +126,8 @@ const opListThreatIntelSets = "ListThreatIntelSets" // Amazon GuardDuty. // // Lists the ThreatIntelSets of the GuardDuty service specified by the detector -// ID. +// ID. If you use this operation from a member account, the ThreatIntelSets +// associated with the master account are returned. // // // Example sending a request using ListThreatIntelSetsRequest. // req := client.ListThreatIntelSetsRequest(params) diff --git a/service/guardduty/api_op_StartMonitoringMembers.go b/service/guardduty/api_op_StartMonitoringMembers.go index 7c1665b9601..bb0cb320bec 100644 --- a/service/guardduty/api_op_StartMonitoringMembers.go +++ b/service/guardduty/api_op_StartMonitoringMembers.go @@ -13,14 +13,13 @@ import ( type StartMonitoringMembersInput struct { _ struct{} `type:"structure"` - // A list of account IDs of the GuardDuty member accounts whose findings you - // want the master account to monitor. + // A list of account IDs of the GuardDuty member accounts to start monitoring. // // AccountIds is a required field AccountIds []string `locationName:"accountIds" min:"1" type:"list" required:"true"` - // The unique ID of the detector of the GuardDuty account whom you want to re-enable - // to monitor members' findings. + // The unique ID of the detector of the GuardDuty master account associated + // with the member accounts to monitor. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` @@ -117,9 +116,9 @@ const opStartMonitoringMembers = "StartMonitoringMembers" // StartMonitoringMembersRequest returns a request value for making API operation for // Amazon GuardDuty. // -// Re-enables GuardDuty to monitor findings of the member accounts specified -// by the account IDs. A master GuardDuty account can run this command after -// disabling GuardDuty from monitoring these members' findings by running StopMonitoringMembers. +// Turns on GuardDuty monitoring of the specified member accounts. Use this +// operation to restart monitoring of accounts that you stopped monitoring with +// the StopMonitoringMembers operation. // // // Example sending a request using StartMonitoringMembersRequest. // req := client.StartMonitoringMembersRequest(params) diff --git a/service/guardduty/api_op_StopMonitoringMembers.go b/service/guardduty/api_op_StopMonitoringMembers.go index 8f22fdb4ce4..4de94a1e695 100644 --- a/service/guardduty/api_op_StopMonitoringMembers.go +++ b/service/guardduty/api_op_StopMonitoringMembers.go @@ -117,10 +117,8 @@ const opStopMonitoringMembers = "StopMonitoringMembers" // StopMonitoringMembersRequest returns a request value for making API operation for // Amazon GuardDuty. // -// Disables GuardDuty from monitoring findings of the member accounts specified -// by the account IDs. After running this command, a master GuardDuty account -// can run StartMonitoringMembers to re-enable GuardDuty to monitor these members’ -// findings. +// Stops GuardDuty monitoring for the specified member accounnts. Use the StartMonitoringMembers +// to restart monitoring for those accounts. // // // Example sending a request using StopMonitoringMembersRequest. // req := client.StopMonitoringMembersRequest(params) diff --git a/service/guardduty/api_op_TagResource.go b/service/guardduty/api_op_TagResource.go index c24ac9d9ec5..4142bd1ed65 100644 --- a/service/guardduty/api_op_TagResource.go +++ b/service/guardduty/api_op_TagResource.go @@ -13,7 +13,8 @@ import ( type TagResourceInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) for the given GuardDuty resource + // The Amazon Resource Name (ARN) for the GuardDuty resource to apply a tag + // to. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` diff --git a/service/guardduty/api_op_UnarchiveFindings.go b/service/guardduty/api_op_UnarchiveFindings.go index 5f010d6983a..1eb9b31b796 100644 --- a/service/guardduty/api_op_UnarchiveFindings.go +++ b/service/guardduty/api_op_UnarchiveFindings.go @@ -13,13 +13,12 @@ import ( type UnarchiveFindingsInput struct { _ struct{} `type:"structure"` - // The ID of the detector that specifies the GuardDuty service whose findings - // you want to unarchive. + // The ID of the detector associated with the findings to unarchive. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // IDs of the findings that you want to unarchive. + // IDs of the findings to unarchive. // // FindingIds is a required field FindingIds []string `locationName:"findingIds" type:"list" required:"true"` @@ -95,7 +94,7 @@ const opUnarchiveFindings = "UnarchiveFindings" // UnarchiveFindingsRequest returns a request value for making API operation for // Amazon GuardDuty. // -// Unarchives Amazon GuardDuty findings specified by the list of finding IDs. +// Unarchives GuardDuty findings specified by the findingIds. // // // Example sending a request using UnarchiveFindingsRequest. // req := client.UnarchiveFindingsRequest(params) diff --git a/service/guardduty/api_op_UntagResource.go b/service/guardduty/api_op_UntagResource.go index e5d0fb81814..8b76a7d661c 100644 --- a/service/guardduty/api_op_UntagResource.go +++ b/service/guardduty/api_op_UntagResource.go @@ -13,12 +13,12 @@ import ( type UntagResourceInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) for the given GuardDuty resource + // The Amazon Resource Name (ARN) for the resource to remove tags from. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` - // The tag keys to remove from a resource. + // The tag keys to remove from the resource. // // TagKeys is a required field TagKeys []string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` diff --git a/service/guardduty/api_op_UpdateDetector.go b/service/guardduty/api_op_UpdateDetector.go index 0c31b2792d7..ae1d7a8871d 100644 --- a/service/guardduty/api_op_UpdateDetector.go +++ b/service/guardduty/api_op_UpdateDetector.go @@ -13,16 +13,16 @@ import ( type UpdateDetectorInput struct { _ struct{} `type:"structure"` - // The unique ID of the detector that you want to update. + // The unique ID of the detector to update. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // Updated boolean value for the detector that specifies whether the detector - // is enabled. + // Specifies whether the detector is enabled or not enabled. Enable *bool `locationName:"enable" type:"boolean"` - // A enum value that specifies how frequently customer got Finding updates published. + // A enum value that specifies how frequently findings are exported, such as + // to CloudWatch Events. FindingPublishingFrequency FindingPublishingFrequency `locationName:"findingPublishingFrequency" type:"string" enum:"true"` } @@ -92,7 +92,7 @@ const opUpdateDetector = "UpdateDetector" // UpdateDetectorRequest returns a request value for making API operation for // Amazon GuardDuty. // -// Updates an Amazon GuardDuty detector specified by the detectorId. +// Updates the Amazon GuardDuty detector specified by the detectorId. // // // Example sending a request using UpdateDetectorRequest. // req := client.UpdateDetectorRequest(params) diff --git a/service/guardduty/api_op_UpdateFindingsFeedback.go b/service/guardduty/api_op_UpdateFindingsFeedback.go index fc58ce9f02c..33785e9a1e5 100644 --- a/service/guardduty/api_op_UpdateFindingsFeedback.go +++ b/service/guardduty/api_op_UpdateFindingsFeedback.go @@ -16,13 +16,12 @@ type UpdateFindingsFeedbackInput struct { // Additional feedback about the GuardDuty findings. Comments *string `locationName:"comments" type:"string"` - // The ID of the detector that specifies the GuardDuty service whose findings - // you want to mark as useful or not useful. + // The ID of the detector associated with the findings to update feedback for. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // Valid values: USEFUL | NOT_USEFUL + // The feedback for the finding. // // Feedback is a required field Feedback Feedback `locationName:"feedback" type:"string" required:"true" enum:"true"` @@ -118,7 +117,7 @@ const opUpdateFindingsFeedback = "UpdateFindingsFeedback" // UpdateFindingsFeedbackRequest returns a request value for making API operation for // Amazon GuardDuty. // -// Marks specified Amazon GuardDuty findings as useful or not useful. +// Marks the specified GuardDuty findings as useful or not useful. // // // Example sending a request using UpdateFindingsFeedbackRequest. // req := client.UpdateFindingsFeedbackRequest(params) diff --git a/service/guardduty/api_op_UpdatePublishingDestination.go b/service/guardduty/api_op_UpdatePublishingDestination.go new file mode 100644 index 00000000000..3dd45b5b105 --- /dev/null +++ b/service/guardduty/api_op_UpdatePublishingDestination.go @@ -0,0 +1,162 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package guardduty + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdatePublishingDestinationInput struct { + _ struct{} `type:"structure"` + + // The ID of the detector associated with the publishing destinations to update. + // + // DestinationId is a required field + DestinationId *string `location:"uri" locationName:"destinationId" type:"string" required:"true"` + + // A DestinationProperties object that includes the DestinationArn and KmsKeyArn + // of the publishing destination. + DestinationProperties *DestinationProperties `locationName:"destinationProperties" type:"structure"` + + // The ID of the + // + // DetectorId is a required field + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdatePublishingDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdatePublishingDestinationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdatePublishingDestinationInput"} + + if s.DestinationId == nil { + invalidParams.Add(aws.NewErrParamRequired("DestinationId")) + } + + if s.DetectorId == nil { + invalidParams.Add(aws.NewErrParamRequired("DetectorId")) + } + if s.DetectorId != nil && len(*s.DetectorId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DetectorId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdatePublishingDestinationInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DestinationProperties != nil { + v := s.DestinationProperties + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "destinationProperties", v, metadata) + } + if s.DestinationId != nil { + v := *s.DestinationId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "destinationId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DetectorId != nil { + v := *s.DetectorId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "detectorId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdatePublishingDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdatePublishingDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdatePublishingDestinationOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opUpdatePublishingDestination = "UpdatePublishingDestination" + +// UpdatePublishingDestinationRequest returns a request value for making API operation for +// Amazon GuardDuty. +// +// Updates information about the publishing destination specified by the destinationId. +// +// // Example sending a request using UpdatePublishingDestinationRequest. +// req := client.UpdatePublishingDestinationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdatePublishingDestination +func (c *Client) UpdatePublishingDestinationRequest(input *UpdatePublishingDestinationInput) UpdatePublishingDestinationRequest { + op := &aws.Operation{ + Name: opUpdatePublishingDestination, + HTTPMethod: "POST", + HTTPPath: "/detector/{detectorId}/publishingDestination/{destinationId}", + } + + if input == nil { + input = &UpdatePublishingDestinationInput{} + } + + req := c.newRequest(op, input, &UpdatePublishingDestinationOutput{}) + return UpdatePublishingDestinationRequest{Request: req, Input: input, Copy: c.UpdatePublishingDestinationRequest} +} + +// UpdatePublishingDestinationRequest is the request type for the +// UpdatePublishingDestination API operation. +type UpdatePublishingDestinationRequest struct { + *aws.Request + Input *UpdatePublishingDestinationInput + Copy func(*UpdatePublishingDestinationInput) UpdatePublishingDestinationRequest +} + +// Send marshals and sends the UpdatePublishingDestination API request. +func (r UpdatePublishingDestinationRequest) Send(ctx context.Context) (*UpdatePublishingDestinationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdatePublishingDestinationResponse{ + UpdatePublishingDestinationOutput: r.Request.Data.(*UpdatePublishingDestinationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdatePublishingDestinationResponse is the response type for the +// UpdatePublishingDestination API operation. +type UpdatePublishingDestinationResponse struct { + *UpdatePublishingDestinationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdatePublishingDestination request. +func (r *UpdatePublishingDestinationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/guardduty/api_types.go b/service/guardduty/api_types.go index 6981e1ab815..efd58cc0004 100644 --- a/service/guardduty/api_types.go +++ b/service/guardduty/api_types.go @@ -271,8 +271,8 @@ func (s City) MarshalFields(e protocol.FieldEncoder) error { type Condition struct { _ struct{} `type:"structure"` - // Deprecated. Represents the equal condition to be applied to a single field - // when querying for findings. + // Represents the equal condition to be applied to a single field when querying + // for findings. Eq []string `locationName:"eq" deprecated:"true" type:"list"` // Represents an equal condition to be applied to a single field when querying @@ -287,12 +287,12 @@ type Condition struct { // when querying for findings. GreaterThanOrEqual *int64 `locationName:"greaterThanOrEqual" type:"long"` - // Deprecated. Represents a greater than condition to be applied to a single - // field when querying for findings. + // Represents a greater than condition to be applied to a single field when + // querying for findings. Gt *int64 `locationName:"gt" deprecated:"true" type:"integer"` - // Deprecated. Represents a greater than equal condition to be applied to a - // single field when querying for findings. + // Represents a greater than equal condition to be applied to a single field + // when querying for findings. Gte *int64 `locationName:"gte" deprecated:"true" type:"integer"` // Represents a less than condition to be applied to a single field when querying @@ -303,16 +303,16 @@ type Condition struct { // querying for findings. LessThanOrEqual *int64 `locationName:"lessThanOrEqual" type:"long"` - // Deprecated. Represents a less than condition to be applied to a single field - // when querying for findings. + // Represents a less than condition to be applied to a single field when querying + // for findings. Lt *int64 `locationName:"lt" deprecated:"true" type:"integer"` - // Deprecated. Represents a less than equal condition to be applied to a single - // field when querying for findings. + // Represents a less than equal condition to be applied to a single field when + // querying for findings. Lte *int64 `locationName:"lte" deprecated:"true" type:"integer"` - // Deprecated. Represents the not equal condition to be applied to a single - // field when querying for findings. + // Represents the not equal condition to be applied to a single field when querying + // for findings. Neq []string `locationName:"neq" deprecated:"true" type:"list"` // Represents an not equal condition to be applied to a single field when querying @@ -426,7 +426,8 @@ func (s Condition) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the country. +// Contains information about the country in which the remote IP address is +// located. type Country struct { _ struct{} `type:"structure"` @@ -459,11 +460,95 @@ func (s Country) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the DNS request. +// Contains information about a publishing destination, including the ID, type, +// and status. +type Destination struct { + _ struct{} `type:"structure"` + + // The unique ID of the publishing destination. + // + // DestinationId is a required field + DestinationId *string `locationName:"destinationId" type:"string" required:"true"` + + // The type of resource used for the publishing destination. Currently, only + // S3 is supported. + // + // DestinationType is a required field + DestinationType DestinationType `locationName:"destinationType" min:"1" type:"string" required:"true" enum:"true"` + + // The status of the publishing destination. + // + // Status is a required field + Status PublishingStatus `locationName:"status" min:"1" type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Destination) MarshalFields(e protocol.FieldEncoder) error { + if s.DestinationId != nil { + v := *s.DestinationId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "destinationId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.DestinationType) > 0 { + v := s.DestinationType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "destinationType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "status", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Contains the ARN of the resource to publish to, such as an S3 bucket, and +// the ARN of the KMS key to use to encrypt published findings. +type DestinationProperties struct { + _ struct{} `type:"structure"` + + // The ARN of the resource to publish to. + DestinationArn *string `locationName:"destinationArn" type:"string"` + + // The ARN of the KMS key to use for encryption. + KmsKeyArn *string `locationName:"kmsKeyArn" type:"string"` +} + +// String returns the string representation +func (s DestinationProperties) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DestinationProperties) MarshalFields(e protocol.FieldEncoder) error { + if s.DestinationArn != nil { + v := *s.DestinationArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "destinationArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.KmsKeyArn != nil { + v := *s.KmsKeyArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "kmsKeyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Contains information about the DNS_REQUEST action described in this finding. type DnsRequestAction struct { _ struct{} `type:"structure"` - // Domain information for the DNS request. + // Domain information for the API request. Domain *string `locationName:"domain" type:"string"` } @@ -537,7 +622,8 @@ func (s Evidence) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the finding. +// Contains information about the finding, which is generated when abnormal +// or suspicious activity is detected. type Finding struct { _ struct{} `type:"structure"` @@ -575,7 +661,8 @@ type Finding struct { // Region is a required field Region *string `locationName:"region" type:"string" required:"true"` - // Contains information about the resource. + // Contains information about the AWS resource associated with the activity + // that prompted GuardDuty to generate a finding. // // Resource is a required field Resource *Resource `locationName:"resource" type:"structure" required:"true"` @@ -585,7 +672,7 @@ type Finding struct { // SchemaVersion is a required field SchemaVersion *string `locationName:"schemaVersion" type:"string" required:"true"` - // Contains information about the service. + // Contains additional information about the generated finding. Service *Service `locationName:"service" type:"structure"` // The severity of the finding. @@ -707,7 +794,7 @@ func (s Finding) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains finding criteria information. +// Contains information about the criteria used for querying findings. type FindingCriteria struct { _ struct{} `type:"structure"` @@ -768,7 +855,7 @@ func (s FindingStatistics) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the +// Contains information about the location of the remote IP address. type GeoLocation struct { _ struct{} `type:"structure"` @@ -801,7 +888,7 @@ func (s GeoLocation) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the instance profile. +// Contains information about the EC2 instance profile. type IamInstanceProfile struct { _ struct{} `type:"structure"` @@ -975,17 +1062,18 @@ func (s InstanceDetails) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the invitation. +// Contains information about the invitation to become a member account. type Invitation struct { _ struct{} `type:"structure"` - // Inviter account ID + // The ID of the account from which the invitations was sent. AccountId *string `locationName:"accountId" min:"12" type:"string"` - // This value is used to validate the inviter account to the member account. + // The ID of the invitation. This value is used to validate the inviter account + // to the member account. InvitationId *string `locationName:"invitationId" type:"string"` - // Timestamp at which the invitation was sent + // Timestamp at which the invitation was sent. InvitedAt *string `locationName:"invitedAt" type:"string"` // The status of the relationship between the inviter and invitee accounts. @@ -1198,7 +1286,8 @@ func (s Member) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the network connection. +// Contains information about the NETWORK_CONNECTION action described in the +// finding. type NetworkConnectionAction struct { _ struct{} `type:"structure"` @@ -1267,7 +1356,7 @@ func (s NetworkConnectionAction) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the network interface. +// Contains information about the network interface of the Ec2 instance. type NetworkInterface struct { _ struct{} `type:"structure"` @@ -1390,7 +1479,7 @@ func (s NetworkInterface) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Continas information about the organization. +// Continas information about the ISP organization of the remote IP address. type Organization struct { _ struct{} `type:"structure"` @@ -1441,7 +1530,7 @@ func (s Organization) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the port probe. +// Contains information about the PORT_PROBE action described in the finding. type PortProbeAction struct { _ struct{} `type:"structure"` @@ -1513,7 +1602,7 @@ func (s PortProbeDetail) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the private IP address. +// Contains other private IP address information of the EC2 instance. type PrivateIpAddressDetails struct { _ struct{} `type:"structure"` @@ -1546,7 +1635,7 @@ func (s PrivateIpAddressDetails) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the product code. +// Contains information about the product code for the Ec2 instance. type ProductCode struct { _ struct{} `type:"structure"` @@ -1579,7 +1668,7 @@ func (s ProductCode) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Continas information about the remote IP address. +// Continas information about the remote IP address of the connection. type RemoteIpDetails struct { _ struct{} `type:"structure"` @@ -1672,7 +1761,8 @@ func (s RemotePortDetails) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the resource. +// Contains information about the AWS resource associated with the activity +// that prompted GuardDuty to generate a finding. type Resource struct { _ struct{} `type:"structure"` @@ -1716,7 +1806,7 @@ func (s Resource) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the security group. +// Contains information about the security groups associated with the EC2 instance. type SecurityGroup struct { _ struct{} `type:"structure"` @@ -1749,7 +1839,7 @@ func (s SecurityGroup) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the service. +// Contains additional information about the generated finding. type Service struct { _ struct{} `type:"structure"` @@ -1856,7 +1946,7 @@ func (s Service) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the criteria for sorting. +// Contains information about the criteria used for sorting findings. type SortCriteria struct { _ struct{} `type:"structure"` @@ -1890,7 +1980,7 @@ func (s SortCriteria) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Contains information about the tag associated with the resource. +// Contains information about a tag associated with the Ec2 instance. type Tag struct { _ struct{} `type:"structure"` diff --git a/service/guardduty/guarddutyiface/interface.go b/service/guardduty/guarddutyiface/interface.go index 44cd4ec9584..6daef39c0dc 100644 --- a/service/guardduty/guarddutyiface/interface.go +++ b/service/guardduty/guarddutyiface/interface.go @@ -73,6 +73,8 @@ type ClientAPI interface { CreateMembersRequest(*guardduty.CreateMembersInput) guardduty.CreateMembersRequest + CreatePublishingDestinationRequest(*guardduty.CreatePublishingDestinationInput) guardduty.CreatePublishingDestinationRequest + CreateSampleFindingsRequest(*guardduty.CreateSampleFindingsInput) guardduty.CreateSampleFindingsRequest CreateThreatIntelSetRequest(*guardduty.CreateThreatIntelSetInput) guardduty.CreateThreatIntelSetRequest @@ -89,8 +91,12 @@ type ClientAPI interface { DeleteMembersRequest(*guardduty.DeleteMembersInput) guardduty.DeleteMembersRequest + DeletePublishingDestinationRequest(*guardduty.DeletePublishingDestinationInput) guardduty.DeletePublishingDestinationRequest + DeleteThreatIntelSetRequest(*guardduty.DeleteThreatIntelSetInput) guardduty.DeleteThreatIntelSetRequest + DescribePublishingDestinationRequest(*guardduty.DescribePublishingDestinationInput) guardduty.DescribePublishingDestinationRequest + DisassociateFromMasterAccountRequest(*guardduty.DisassociateFromMasterAccountInput) guardduty.DisassociateFromMasterAccountRequest DisassociateMembersRequest(*guardduty.DisassociateMembersInput) guardduty.DisassociateMembersRequest @@ -127,6 +133,8 @@ type ClientAPI interface { ListMembersRequest(*guardduty.ListMembersInput) guardduty.ListMembersRequest + ListPublishingDestinationsRequest(*guardduty.ListPublishingDestinationsInput) guardduty.ListPublishingDestinationsRequest + ListTagsForResourceRequest(*guardduty.ListTagsForResourceInput) guardduty.ListTagsForResourceRequest ListThreatIntelSetsRequest(*guardduty.ListThreatIntelSetsInput) guardduty.ListThreatIntelSetsRequest @@ -149,6 +157,8 @@ type ClientAPI interface { UpdateIPSetRequest(*guardduty.UpdateIPSetInput) guardduty.UpdateIPSetRequest + UpdatePublishingDestinationRequest(*guardduty.UpdatePublishingDestinationInput) guardduty.UpdatePublishingDestinationRequest + UpdateThreatIntelSetRequest(*guardduty.UpdateThreatIntelSetInput) guardduty.UpdateThreatIntelSetRequest } diff --git a/service/iam/api_types.go b/service/iam/api_types.go index be79a37a471..ec981cb26b6 100644 --- a/service/iam/api_types.go +++ b/service/iam/api_types.go @@ -232,7 +232,7 @@ func (s AttachedPermissionsBoundary) String() string { // // For more information about managed policies, refer to Managed Policies and // Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) -// in the Using IAM guide. +// in the IAM User Guide. type AttachedPolicy struct { _ struct{} `type:"structure"` @@ -372,7 +372,7 @@ type EntityInfo struct { // The path to the entity (user or role). For more information about paths, // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. Path *string `min:"1" type:"string"` // The type of entity (user or role). @@ -483,7 +483,7 @@ type Group struct { // The Amazon Resource Name (ARN) specifying the group. For more information // about ARNs and how to use them in policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -496,7 +496,7 @@ type Group struct { // The stable and unique string identifying the group. For more information // about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // GroupId is a required field GroupId *string `min:"16" type:"string" required:"true"` @@ -508,7 +508,7 @@ type Group struct { // The path to the group. For more information about paths, see IAM Identifiers // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // Path is a required field Path *string `min:"1" type:"string" required:"true"` @@ -542,7 +542,7 @@ type GroupDetail struct { // The stable and unique string identifying the group. For more information // about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. GroupId *string `min:"16" type:"string"` // The friendly name that identifies the group. @@ -553,7 +553,7 @@ type GroupDetail struct { // The path to the group. For more information about paths, see IAM Identifiers // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. Path *string `min:"1" type:"string"` } @@ -579,7 +579,7 @@ type InstanceProfile struct { // The Amazon Resource Name (ARN) specifying the instance profile. For more // information about ARNs and how to use them in policies, see IAM Identifiers // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -591,7 +591,7 @@ type InstanceProfile struct { // The stable and unique string identifying the instance profile. For more information // about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // InstanceProfileId is a required field InstanceProfileId *string `min:"16" type:"string" required:"true"` @@ -603,7 +603,7 @@ type InstanceProfile struct { // The path to the instance profile. For more information about paths, see IAM // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // Path is a required field Path *string `min:"1" type:"string" required:"true"` @@ -712,7 +712,7 @@ func (s MFADevice) String() string { // // For more information about managed policies, see Managed Policies and Inline // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) -// in the Using IAM guide. +// in the IAM User Guide. type ManagedPolicyDetail struct { _ struct{} `type:"structure"` @@ -736,7 +736,7 @@ type ManagedPolicyDetail struct { // // For more information about policy versions, see Versioning for Managed Policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) - // in the Using IAM guide. + // in the IAM User Guide. DefaultVersionId *string `type:"string"` // A friendly description of the policy. @@ -748,7 +748,7 @@ type ManagedPolicyDetail struct { // The path to the policy. // // For more information about paths, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. Path *string `min:"1" type:"string"` // The number of entities (users and roles) for which the policy is used as @@ -762,7 +762,7 @@ type ManagedPolicyDetail struct { // The stable and unique string identifying the policy. // // For more information about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. PolicyId *string `min:"16" type:"string"` // The friendly name (not ARN) identifying the policy. @@ -872,7 +872,7 @@ func (s PasswordPolicy) String() string { // // For more information about managed policies, refer to Managed Policies and // Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) -// in the Using IAM guide. +// in the IAM User Guide. type Policy struct { _ struct{} `type:"structure"` @@ -906,7 +906,7 @@ type Policy struct { // The path to the policy. // // For more information about paths, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. Path *string `min:"1" type:"string"` // The number of entities (users and roles) for which the policy is used to @@ -920,7 +920,7 @@ type Policy struct { // The stable and unique string identifying the policy. // // For more information about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. PolicyId *string `min:"16" type:"string"` // The friendly name (not ARN) identifying the policy. @@ -1015,7 +1015,7 @@ func (s PolicyGrantingServiceAccess) String() string { // // For more information about managed policies, refer to Managed Policies and // Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) -// in the Using IAM guide. +// in the IAM User Guide. type PolicyGroup struct { _ struct{} `type:"structure"` @@ -1040,7 +1040,7 @@ func (s PolicyGroup) String() string { // // For more information about managed policies, refer to Managed Policies and // Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) -// in the Using IAM guide. +// in the IAM User Guide. type PolicyRole struct { _ struct{} `type:"structure"` @@ -1065,7 +1065,7 @@ func (s PolicyRole) String() string { // // For more information about managed policies, refer to Managed Policies and // Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) -// in the Using IAM guide. +// in the IAM User Guide. type PolicyUser struct { _ struct{} `type:"structure"` @@ -1091,7 +1091,7 @@ func (s PolicyUser) String() string { // // For more information about managed policies, refer to Managed Policies and // Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) -// in the Using IAM guide. +// in the IAM User Guide. type PolicyVersion struct { _ struct{} `type:"structure"` @@ -1226,7 +1226,7 @@ type Role struct { // The path to the role. For more information about paths, see IAM Identifiers // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // Path is a required field Path *string `min:"1" type:"string" required:"true"` @@ -1240,11 +1240,20 @@ type Role struct { // The stable and unique string identifying the role. For more information about // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // RoleId is a required field RoleId *string `min:"16" type:"string" required:"true"` + // Contains information about the last time that an IAM role was used. This + // includes the date and time and the Region in which the role was last used. + // Activity is only reported for the trailing 400 days. This period can be shorter + // if your Region began supporting these features within the last year. The + // role might have been used more than 400 days ago. For more information, see + // Regions Where Data Is Tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) + // in the IAM User Guide. + RoleLastUsed *RoleLastUsed `type:"structure"` + // The friendly name that identifies the role. // // RoleName is a required field @@ -1291,7 +1300,7 @@ type RoleDetail struct { // The path to the role. For more information about paths, see IAM Identifiers // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. Path *string `min:"1" type:"string"` // The ARN of the policy used to set the permissions boundary for the role. @@ -1303,9 +1312,18 @@ type RoleDetail struct { // The stable and unique string identifying the role. For more information about // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. RoleId *string `min:"16" type:"string"` + // Contains information about the last time that an IAM role was used. This + // includes the date and time and the Region in which the role was last used. + // Activity is only reported for the trailing 400 days. This period can be shorter + // if your Region began supporting these features within the last year. The + // role might have been used more than 400 days ago. For more information, see + // Regions Where Data Is Tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) + // in the IAM User Guide. + RoleLastUsed *RoleLastUsed `type:"structure"` + // The friendly name that identifies the role. RoleName *string `min:"1" type:"string"` @@ -1324,6 +1342,37 @@ func (s RoleDetail) String() string { return awsutil.Prettify(s) } +// Contains information about the last time that an IAM role was used. This +// includes the date and time and the Region in which the role was last used. +// Activity is only reported for the trailing 400 days. This period can be shorter +// if your Region began supporting these features within the last year. The +// role might have been used more than 400 days ago. For more information, see +// Regions Where Data Is Tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) +// in the IAM User Guide. +// +// This data type is returned as a response element in the GetRole and GetAccountAuthorizationDetails +// operations. +type RoleLastUsed struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601) + // that the role was last used. + // + // This field is null if the role has not been used within the IAM tracking + // period. For more information about the tracking period, see Regions Where + // Data Is Tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period) + // in the IAM User Guide. + LastUsedDate *time.Time `type:"timestamp"` + + // The name of the AWS Region in which the role was last used. + Region *string `type:"string"` +} + +// String returns the string representation +func (s RoleLastUsed) String() string { + return awsutil.Prettify(s) +} + // An object that contains details about how a service-linked role is used, // if that information is returned by the service. // @@ -1480,7 +1529,7 @@ type ServerCertificateMetadata struct { // The Amazon Resource Name (ARN) specifying the server certificate. For more // information about ARNs and how to use them in policies, see IAM Identifiers // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -1490,14 +1539,14 @@ type ServerCertificateMetadata struct { // The path to the server certificate. For more information about paths, see // IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // Path is a required field Path *string `min:"1" type:"string" required:"true"` // The stable and unique string identifying the server certificate. For more // information about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // ServerCertificateId is a required field ServerCertificateId *string `min:"16" type:"string" required:"true"` @@ -1791,7 +1840,7 @@ type User struct { // The Amazon Resource Name (ARN) that identifies the user. For more information // about ARNs and how to use ARNs in policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -1806,7 +1855,7 @@ type User struct { // when the user's password was last used to sign in to an AWS website. For // a list of AWS websites that capture a user's last sign-in time, see the Credential // Reports (https://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) - // topic in the Using IAM guide. If a password is used more than once in a five-minute + // topic in the IAM User Guide. If a password is used more than once in a five-minute // span, only the first use is returned in this field. If the field is null // (no value), then it indicates that they never signed in with a password. // This can be because: @@ -1817,7 +1866,7 @@ type User struct { // information on October 20, 2014. // // A null value does not mean that the user never had a password. Also, if the - // user does not currently have a password, but had one in the past, then this + // user does not currently have a password but had one in the past, then this // field contains the date and time the most recent password was used. // // This value is returned only in the GetUser and ListUsers operations. @@ -1825,7 +1874,7 @@ type User struct { // The path to the user. For more information about paths, see IAM Identifiers // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // Path is a required field Path *string `min:"1" type:"string" required:"true"` @@ -1844,7 +1893,7 @@ type User struct { // The stable and unique string identifying the user. For more information about // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // UserId is a required field UserId *string `min:"16" type:"string" required:"true"` @@ -1887,7 +1936,7 @@ type UserDetail struct { // The path to the user. For more information about paths, see IAM Identifiers // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. Path *string `min:"1" type:"string"` // The ARN of the policy used to set the permissions boundary for the user. @@ -1904,7 +1953,7 @@ type UserDetail struct { // The stable and unique string identifying the user. For more information about // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. UserId *string `min:"16" type:"string"` // The friendly name identifying the user. diff --git a/service/iot/api_enums.go b/service/iot/api_enums.go index aded2bb0355..89362c2d094 100644 --- a/service/iot/api_enums.go +++ b/service/iot/api_enums.go @@ -456,6 +456,24 @@ func (enum EventType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type FieldType string + +// Enum values for FieldType +const ( + FieldTypeNumber FieldType = "Number" + FieldTypeString FieldType = "String" + FieldTypeBoolean FieldType = "Boolean" +) + +func (enum FieldType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum FieldType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type IndexStatus string // Enum values for IndexStatus @@ -772,6 +790,25 @@ func (enum ThingIndexingMode) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type TopicRuleDestinationStatus string + +// Enum values for TopicRuleDestinationStatus +const ( + TopicRuleDestinationStatusEnabled TopicRuleDestinationStatus = "ENABLED" + TopicRuleDestinationStatusInProgress TopicRuleDestinationStatus = "IN_PROGRESS" + TopicRuleDestinationStatusDisabled TopicRuleDestinationStatus = "DISABLED" + TopicRuleDestinationStatusError TopicRuleDestinationStatus = "ERROR" +) + +func (enum TopicRuleDestinationStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum TopicRuleDestinationStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type ViolationEventType string // Enum values for ViolationEventType diff --git a/service/iot/api_op_AttachThingPrincipal.go b/service/iot/api_op_AttachThingPrincipal.go index 066a855fb8d..77712d3c302 100644 --- a/service/iot/api_op_AttachThingPrincipal.go +++ b/service/iot/api_op_AttachThingPrincipal.go @@ -14,7 +14,8 @@ import ( type AttachThingPrincipalInput struct { _ struct{} `type:"structure"` - // The principal, such as a certificate or other credential. + // The principal, which can be a certificate ARN (as returned from the CreateCertificate + // operation) or an Amazon Cognito ID. // // Principal is a required field Principal *string `location:"header" locationName:"x-amzn-principal" type:"string" required:"true"` diff --git a/service/iot/api_op_ConfirmTopicRuleDestination.go b/service/iot/api_op_ConfirmTopicRuleDestination.go new file mode 100644 index 00000000000..c428c29ed3c --- /dev/null +++ b/service/iot/api_op_ConfirmTopicRuleDestination.go @@ -0,0 +1,139 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iot + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ConfirmTopicRuleDestinationInput struct { + _ struct{} `type:"structure"` + + // The token used to confirm ownership or access to the topic rule confirmation + // URL. + // + // ConfirmationToken is a required field + ConfirmationToken *string `location:"uri" locationName:"confirmationToken" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConfirmTopicRuleDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfirmTopicRuleDestinationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ConfirmTopicRuleDestinationInput"} + + if s.ConfirmationToken == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfirmationToken")) + } + if s.ConfirmationToken != nil && len(*s.ConfirmationToken) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ConfirmationToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ConfirmTopicRuleDestinationInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ConfirmationToken != nil { + v := *s.ConfirmationToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "confirmationToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ConfirmTopicRuleDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ConfirmTopicRuleDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ConfirmTopicRuleDestinationOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opConfirmTopicRuleDestination = "ConfirmTopicRuleDestination" + +// ConfirmTopicRuleDestinationRequest returns a request value for making API operation for +// AWS IoT. +// +// Confirms a topic rule destination. When you create a rule requiring a destination, +// AWS IoT sends a confirmation message to the endpoint or base address you +// specify. The message includes a token which you pass back when calling ConfirmTopicRuleDestination +// to confirm that you own or have access to the endpoint. +// +// // Example sending a request using ConfirmTopicRuleDestinationRequest. +// req := client.ConfirmTopicRuleDestinationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +func (c *Client) ConfirmTopicRuleDestinationRequest(input *ConfirmTopicRuleDestinationInput) ConfirmTopicRuleDestinationRequest { + op := &aws.Operation{ + Name: opConfirmTopicRuleDestination, + HTTPMethod: "GET", + HTTPPath: "/confirmdestination/{confirmationToken+}", + } + + if input == nil { + input = &ConfirmTopicRuleDestinationInput{} + } + + req := c.newRequest(op, input, &ConfirmTopicRuleDestinationOutput{}) + return ConfirmTopicRuleDestinationRequest{Request: req, Input: input, Copy: c.ConfirmTopicRuleDestinationRequest} +} + +// ConfirmTopicRuleDestinationRequest is the request type for the +// ConfirmTopicRuleDestination API operation. +type ConfirmTopicRuleDestinationRequest struct { + *aws.Request + Input *ConfirmTopicRuleDestinationInput + Copy func(*ConfirmTopicRuleDestinationInput) ConfirmTopicRuleDestinationRequest +} + +// Send marshals and sends the ConfirmTopicRuleDestination API request. +func (r ConfirmTopicRuleDestinationRequest) Send(ctx context.Context) (*ConfirmTopicRuleDestinationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ConfirmTopicRuleDestinationResponse{ + ConfirmTopicRuleDestinationOutput: r.Request.Data.(*ConfirmTopicRuleDestinationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ConfirmTopicRuleDestinationResponse is the response type for the +// ConfirmTopicRuleDestination API operation. +type ConfirmTopicRuleDestinationResponse struct { + *ConfirmTopicRuleDestinationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ConfirmTopicRuleDestination request. +func (r *ConfirmTopicRuleDestinationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/iot/api_op_CreateStream.go b/service/iot/api_op_CreateStream.go index bc6422f6c1c..5997c733dcf 100644 --- a/service/iot/api_op_CreateStream.go +++ b/service/iot/api_op_CreateStream.go @@ -187,10 +187,6 @@ const opCreateStream = "CreateStream" // Creates a stream for delivering one or more large files in chunks over MQTT. // A stream transports data bytes in chunks or blocks packaged as MQTT messages // from a source like S3. You can have one or more files associated with a stream. -// The total size of a file associated with the stream cannot exceed more than -// 2 MB. The stream will be created with version 0. If a stream is created with -// the same streamID as a stream that existed and was deleted within last 90 -// days, we will resurrect that old stream by incrementing the version by 1. // // // Example sending a request using CreateStreamRequest. // req := client.CreateStreamRequest(params) diff --git a/service/iot/api_op_CreateTopicRuleDestination.go b/service/iot/api_op_CreateTopicRuleDestination.go new file mode 100644 index 00000000000..9a25391bd9e --- /dev/null +++ b/service/iot/api_op_CreateTopicRuleDestination.go @@ -0,0 +1,147 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iot + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateTopicRuleDestinationInput struct { + _ struct{} `type:"structure"` + + // The topic rule destination configuration. + // + // DestinationConfiguration is a required field + DestinationConfiguration *TopicRuleDestinationConfiguration `locationName:"destinationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateTopicRuleDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTopicRuleDestinationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateTopicRuleDestinationInput"} + + if s.DestinationConfiguration == nil { + invalidParams.Add(aws.NewErrParamRequired("DestinationConfiguration")) + } + if s.DestinationConfiguration != nil { + if err := s.DestinationConfiguration.Validate(); err != nil { + invalidParams.AddNested("DestinationConfiguration", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateTopicRuleDestinationInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DestinationConfiguration != nil { + v := s.DestinationConfiguration + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "destinationConfiguration", v, metadata) + } + return nil +} + +type CreateTopicRuleDestinationOutput struct { + _ struct{} `type:"structure"` + + // The topic rule destination. + TopicRuleDestination *TopicRuleDestination `locationName:"topicRuleDestination" type:"structure"` +} + +// String returns the string representation +func (s CreateTopicRuleDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateTopicRuleDestinationOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.TopicRuleDestination != nil { + v := s.TopicRuleDestination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "topicRuleDestination", v, metadata) + } + return nil +} + +const opCreateTopicRuleDestination = "CreateTopicRuleDestination" + +// CreateTopicRuleDestinationRequest returns a request value for making API operation for +// AWS IoT. +// +// Creates a topic rule destination. The destination must be confirmed prior +// to use. +// +// // Example sending a request using CreateTopicRuleDestinationRequest. +// req := client.CreateTopicRuleDestinationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +func (c *Client) CreateTopicRuleDestinationRequest(input *CreateTopicRuleDestinationInput) CreateTopicRuleDestinationRequest { + op := &aws.Operation{ + Name: opCreateTopicRuleDestination, + HTTPMethod: "POST", + HTTPPath: "/destinations", + } + + if input == nil { + input = &CreateTopicRuleDestinationInput{} + } + + req := c.newRequest(op, input, &CreateTopicRuleDestinationOutput{}) + return CreateTopicRuleDestinationRequest{Request: req, Input: input, Copy: c.CreateTopicRuleDestinationRequest} +} + +// CreateTopicRuleDestinationRequest is the request type for the +// CreateTopicRuleDestination API operation. +type CreateTopicRuleDestinationRequest struct { + *aws.Request + Input *CreateTopicRuleDestinationInput + Copy func(*CreateTopicRuleDestinationInput) CreateTopicRuleDestinationRequest +} + +// Send marshals and sends the CreateTopicRuleDestination API request. +func (r CreateTopicRuleDestinationRequest) Send(ctx context.Context) (*CreateTopicRuleDestinationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateTopicRuleDestinationResponse{ + CreateTopicRuleDestinationOutput: r.Request.Data.(*CreateTopicRuleDestinationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateTopicRuleDestinationResponse is the response type for the +// CreateTopicRuleDestination API operation. +type CreateTopicRuleDestinationResponse struct { + *CreateTopicRuleDestinationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateTopicRuleDestination request. +func (r *CreateTopicRuleDestinationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/iot/api_op_DeleteTopicRuleDestination.go b/service/iot/api_op_DeleteTopicRuleDestination.go new file mode 100644 index 00000000000..2801a8bf001 --- /dev/null +++ b/service/iot/api_op_DeleteTopicRuleDestination.go @@ -0,0 +1,132 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iot + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DeleteTopicRuleDestinationInput struct { + _ struct{} `type:"structure"` + + // The ARN of the topic rule destination to delete. + // + // Arn is a required field + Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTopicRuleDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTopicRuleDestinationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteTopicRuleDestinationInput"} + + if s.Arn == nil { + invalidParams.Add(aws.NewErrParamRequired("Arn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteTopicRuleDestinationInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteTopicRuleDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteTopicRuleDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteTopicRuleDestinationOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteTopicRuleDestination = "DeleteTopicRuleDestination" + +// DeleteTopicRuleDestinationRequest returns a request value for making API operation for +// AWS IoT. +// +// Deletes a topic rule destination. +// +// // Example sending a request using DeleteTopicRuleDestinationRequest. +// req := client.DeleteTopicRuleDestinationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +func (c *Client) DeleteTopicRuleDestinationRequest(input *DeleteTopicRuleDestinationInput) DeleteTopicRuleDestinationRequest { + op := &aws.Operation{ + Name: opDeleteTopicRuleDestination, + HTTPMethod: "DELETE", + HTTPPath: "/destinations/{arn+}", + } + + if input == nil { + input = &DeleteTopicRuleDestinationInput{} + } + + req := c.newRequest(op, input, &DeleteTopicRuleDestinationOutput{}) + return DeleteTopicRuleDestinationRequest{Request: req, Input: input, Copy: c.DeleteTopicRuleDestinationRequest} +} + +// DeleteTopicRuleDestinationRequest is the request type for the +// DeleteTopicRuleDestination API operation. +type DeleteTopicRuleDestinationRequest struct { + *aws.Request + Input *DeleteTopicRuleDestinationInput + Copy func(*DeleteTopicRuleDestinationInput) DeleteTopicRuleDestinationRequest +} + +// Send marshals and sends the DeleteTopicRuleDestination API request. +func (r DeleteTopicRuleDestinationRequest) Send(ctx context.Context) (*DeleteTopicRuleDestinationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteTopicRuleDestinationResponse{ + DeleteTopicRuleDestinationOutput: r.Request.Data.(*DeleteTopicRuleDestinationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteTopicRuleDestinationResponse is the response type for the +// DeleteTopicRuleDestination API operation. +type DeleteTopicRuleDestinationResponse struct { + *DeleteTopicRuleDestinationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteTopicRuleDestination request. +func (r *DeleteTopicRuleDestinationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/iot/api_op_GetCardinality.go b/service/iot/api_op_GetCardinality.go new file mode 100644 index 00000000000..1fdf56796e4 --- /dev/null +++ b/service/iot/api_op_GetCardinality.go @@ -0,0 +1,177 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iot + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetCardinalityInput struct { + _ struct{} `type:"structure"` + + // The field to aggregate. + AggregationField *string `locationName:"aggregationField" min:"1" type:"string"` + + // The name of the index to search. + IndexName *string `locationName:"indexName" min:"1" type:"string"` + + // The search query. + // + // QueryString is a required field + QueryString *string `locationName:"queryString" min:"1" type:"string" required:"true"` + + // The query version. + QueryVersion *string `locationName:"queryVersion" type:"string"` +} + +// String returns the string representation +func (s GetCardinalityInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCardinalityInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetCardinalityInput"} + if s.AggregationField != nil && len(*s.AggregationField) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AggregationField", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("IndexName", 1)) + } + + if s.QueryString == nil { + invalidParams.Add(aws.NewErrParamRequired("QueryString")) + } + if s.QueryString != nil && len(*s.QueryString) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("QueryString", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetCardinalityInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AggregationField != nil { + v := *s.AggregationField + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "aggregationField", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IndexName != nil { + v := *s.IndexName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "indexName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.QueryString != nil { + v := *s.QueryString + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "queryString", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.QueryVersion != nil { + v := *s.QueryVersion + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "queryVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetCardinalityOutput struct { + _ struct{} `type:"structure"` + + // The approximate count of unique values that match the query. + Cardinality *int64 `locationName:"cardinality" type:"integer"` +} + +// String returns the string representation +func (s GetCardinalityOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetCardinalityOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Cardinality != nil { + v := *s.Cardinality + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "cardinality", protocol.Int64Value(v), metadata) + } + return nil +} + +const opGetCardinality = "GetCardinality" + +// GetCardinalityRequest returns a request value for making API operation for +// AWS IoT. +// +// Returns the approximate count of unique values that match the query. +// +// // Example sending a request using GetCardinalityRequest. +// req := client.GetCardinalityRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +func (c *Client) GetCardinalityRequest(input *GetCardinalityInput) GetCardinalityRequest { + op := &aws.Operation{ + Name: opGetCardinality, + HTTPMethod: "POST", + HTTPPath: "/indices/cardinality", + } + + if input == nil { + input = &GetCardinalityInput{} + } + + req := c.newRequest(op, input, &GetCardinalityOutput{}) + return GetCardinalityRequest{Request: req, Input: input, Copy: c.GetCardinalityRequest} +} + +// GetCardinalityRequest is the request type for the +// GetCardinality API operation. +type GetCardinalityRequest struct { + *aws.Request + Input *GetCardinalityInput + Copy func(*GetCardinalityInput) GetCardinalityRequest +} + +// Send marshals and sends the GetCardinality API request. +func (r GetCardinalityRequest) Send(ctx context.Context) (*GetCardinalityResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetCardinalityResponse{ + GetCardinalityOutput: r.Request.Data.(*GetCardinalityOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetCardinalityResponse is the response type for the +// GetCardinality API operation. +type GetCardinalityResponse struct { + *GetCardinalityOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetCardinality request. +func (r *GetCardinalityResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/iot/api_op_GetPercentiles.go b/service/iot/api_op_GetPercentiles.go new file mode 100644 index 00000000000..d40e19959fb --- /dev/null +++ b/service/iot/api_op_GetPercentiles.go @@ -0,0 +1,207 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iot + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetPercentilesInput struct { + _ struct{} `type:"structure"` + + // The field to aggregate. + AggregationField *string `locationName:"aggregationField" min:"1" type:"string"` + + // The name of the index to search. + IndexName *string `locationName:"indexName" min:"1" type:"string"` + + // The percentile groups returned. + Percents []float64 `locationName:"percents" type:"list"` + + // The query string. + // + // QueryString is a required field + QueryString *string `locationName:"queryString" min:"1" type:"string" required:"true"` + + // The query version. + QueryVersion *string `locationName:"queryVersion" type:"string"` +} + +// String returns the string representation +func (s GetPercentilesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPercentilesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetPercentilesInput"} + if s.AggregationField != nil && len(*s.AggregationField) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AggregationField", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("IndexName", 1)) + } + + if s.QueryString == nil { + invalidParams.Add(aws.NewErrParamRequired("QueryString")) + } + if s.QueryString != nil && len(*s.QueryString) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("QueryString", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetPercentilesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AggregationField != nil { + v := *s.AggregationField + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "aggregationField", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IndexName != nil { + v := *s.IndexName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "indexName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Percents != nil { + v := s.Percents + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "percents", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.Float64Value(v1)) + } + ls0.End() + + } + if s.QueryString != nil { + v := *s.QueryString + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "queryString", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.QueryVersion != nil { + v := *s.QueryVersion + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "queryVersion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetPercentilesOutput struct { + _ struct{} `type:"structure"` + + // The percentile values of the aggregated fields. + Percentiles []PercentPair `locationName:"percentiles" type:"list"` +} + +// String returns the string representation +func (s GetPercentilesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetPercentilesOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Percentiles != nil { + v := s.Percentiles + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "percentiles", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opGetPercentiles = "GetPercentiles" + +// GetPercentilesRequest returns a request value for making API operation for +// AWS IoT. +// +// Groups the aggregated values that match the query into percentile groupings. +// The default percentile groupings are: 1,5,25,50,75,95,99, although you can +// specify your own when you call GetPercentiles. This function returns a value +// for each percentile group specified (or the default percentile groupings). +// The percentile group "1" contains the aggregated field value that occurs +// in approximately one percent of the values that match the query. The percentile +// group "5" contains the aggregated field value that occurs in approximately +// five percent of the values that match the query, and so on. The result is +// an approximation, the more values that match the query, the more accurate +// the percentile values. +// +// // Example sending a request using GetPercentilesRequest. +// req := client.GetPercentilesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +func (c *Client) GetPercentilesRequest(input *GetPercentilesInput) GetPercentilesRequest { + op := &aws.Operation{ + Name: opGetPercentiles, + HTTPMethod: "POST", + HTTPPath: "/indices/percentiles", + } + + if input == nil { + input = &GetPercentilesInput{} + } + + req := c.newRequest(op, input, &GetPercentilesOutput{}) + return GetPercentilesRequest{Request: req, Input: input, Copy: c.GetPercentilesRequest} +} + +// GetPercentilesRequest is the request type for the +// GetPercentiles API operation. +type GetPercentilesRequest struct { + *aws.Request + Input *GetPercentilesInput + Copy func(*GetPercentilesInput) GetPercentilesRequest +} + +// Send marshals and sends the GetPercentiles API request. +func (r GetPercentilesRequest) Send(ctx context.Context) (*GetPercentilesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetPercentilesResponse{ + GetPercentilesOutput: r.Request.Data.(*GetPercentilesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetPercentilesResponse is the response type for the +// GetPercentiles API operation. +type GetPercentilesResponse struct { + *GetPercentilesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetPercentiles request. +func (r *GetPercentilesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/iot/api_op_GetTopicRuleDestination.go b/service/iot/api_op_GetTopicRuleDestination.go new file mode 100644 index 00000000000..351e7b6f6ab --- /dev/null +++ b/service/iot/api_op_GetTopicRuleDestination.go @@ -0,0 +1,141 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iot + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetTopicRuleDestinationInput struct { + _ struct{} `type:"structure"` + + // The ARN of the topic rule destination. + // + // Arn is a required field + Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTopicRuleDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTopicRuleDestinationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetTopicRuleDestinationInput"} + + if s.Arn == nil { + invalidParams.Add(aws.NewErrParamRequired("Arn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetTopicRuleDestinationInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetTopicRuleDestinationOutput struct { + _ struct{} `type:"structure"` + + // The topic rule destination. + TopicRuleDestination *TopicRuleDestination `locationName:"topicRuleDestination" type:"structure"` +} + +// String returns the string representation +func (s GetTopicRuleDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetTopicRuleDestinationOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.TopicRuleDestination != nil { + v := s.TopicRuleDestination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "topicRuleDestination", v, metadata) + } + return nil +} + +const opGetTopicRuleDestination = "GetTopicRuleDestination" + +// GetTopicRuleDestinationRequest returns a request value for making API operation for +// AWS IoT. +// +// Gets information about a topic rule destination. +// +// // Example sending a request using GetTopicRuleDestinationRequest. +// req := client.GetTopicRuleDestinationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +func (c *Client) GetTopicRuleDestinationRequest(input *GetTopicRuleDestinationInput) GetTopicRuleDestinationRequest { + op := &aws.Operation{ + Name: opGetTopicRuleDestination, + HTTPMethod: "GET", + HTTPPath: "/destinations/{arn+}", + } + + if input == nil { + input = &GetTopicRuleDestinationInput{} + } + + req := c.newRequest(op, input, &GetTopicRuleDestinationOutput{}) + return GetTopicRuleDestinationRequest{Request: req, Input: input, Copy: c.GetTopicRuleDestinationRequest} +} + +// GetTopicRuleDestinationRequest is the request type for the +// GetTopicRuleDestination API operation. +type GetTopicRuleDestinationRequest struct { + *aws.Request + Input *GetTopicRuleDestinationInput + Copy func(*GetTopicRuleDestinationInput) GetTopicRuleDestinationRequest +} + +// Send marshals and sends the GetTopicRuleDestination API request. +func (r GetTopicRuleDestinationRequest) Send(ctx context.Context) (*GetTopicRuleDestinationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetTopicRuleDestinationResponse{ + GetTopicRuleDestinationOutput: r.Request.Data.(*GetTopicRuleDestinationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetTopicRuleDestinationResponse is the response type for the +// GetTopicRuleDestination API operation. +type GetTopicRuleDestinationResponse struct { + *GetTopicRuleDestinationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetTopicRuleDestination request. +func (r *GetTopicRuleDestinationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/iot/api_op_ListTopicRuleDestinations.go b/service/iot/api_op_ListTopicRuleDestinations.go new file mode 100644 index 00000000000..352ebee0bd7 --- /dev/null +++ b/service/iot/api_op_ListTopicRuleDestinations.go @@ -0,0 +1,162 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iot + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListTopicRuleDestinationsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return at one time. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token to retrieve the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListTopicRuleDestinationsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTopicRuleDestinationsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTopicRuleDestinationsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTopicRuleDestinationsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "maxResults", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListTopicRuleDestinationsOutput struct { + _ struct{} `type:"structure"` + + // Information about a topic rule destination. + DestinationSummaries []TopicRuleDestinationSummary `locationName:"destinationSummaries" type:"list"` + + // The token to retrieve the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListTopicRuleDestinationsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTopicRuleDestinationsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DestinationSummaries != nil { + v := s.DestinationSummaries + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "destinationSummaries", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "nextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListTopicRuleDestinations = "ListTopicRuleDestinations" + +// ListTopicRuleDestinationsRequest returns a request value for making API operation for +// AWS IoT. +// +// Lists all the topic rule destinations in your AWS account. +// +// // Example sending a request using ListTopicRuleDestinationsRequest. +// req := client.ListTopicRuleDestinationsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +func (c *Client) ListTopicRuleDestinationsRequest(input *ListTopicRuleDestinationsInput) ListTopicRuleDestinationsRequest { + op := &aws.Operation{ + Name: opListTopicRuleDestinations, + HTTPMethod: "GET", + HTTPPath: "/destinations", + } + + if input == nil { + input = &ListTopicRuleDestinationsInput{} + } + + req := c.newRequest(op, input, &ListTopicRuleDestinationsOutput{}) + return ListTopicRuleDestinationsRequest{Request: req, Input: input, Copy: c.ListTopicRuleDestinationsRequest} +} + +// ListTopicRuleDestinationsRequest is the request type for the +// ListTopicRuleDestinations API operation. +type ListTopicRuleDestinationsRequest struct { + *aws.Request + Input *ListTopicRuleDestinationsInput + Copy func(*ListTopicRuleDestinationsInput) ListTopicRuleDestinationsRequest +} + +// Send marshals and sends the ListTopicRuleDestinations API request. +func (r ListTopicRuleDestinationsRequest) Send(ctx context.Context) (*ListTopicRuleDestinationsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTopicRuleDestinationsResponse{ + ListTopicRuleDestinationsOutput: r.Request.Data.(*ListTopicRuleDestinationsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListTopicRuleDestinationsResponse is the response type for the +// ListTopicRuleDestinations API operation. +type ListTopicRuleDestinationsResponse struct { + *ListTopicRuleDestinationsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTopicRuleDestinations request. +func (r *ListTopicRuleDestinationsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/iot/api_op_UpdateTopicRuleDestination.go b/service/iot/api_op_UpdateTopicRuleDestination.go new file mode 100644 index 00000000000..35da3438598 --- /dev/null +++ b/service/iot/api_op_UpdateTopicRuleDestination.go @@ -0,0 +1,172 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iot + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateTopicRuleDestinationInput struct { + _ struct{} `type:"structure"` + + // The ARN of the topic rule destination. + // + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` + + // The status of the topic rule destination. Valid values are: + // + // IN_PROGRESS + // + // A topic rule destination was created but has not been confirmed. You can + // set status to IN_PROGRESS by calling UpdateTopicRuleDestination. Calling + // UpdateTopicRuleDestination causes a new confirmation challenge to be sent + // to your confirmation endpoint. + // + // ENABLED + // + // Confirmation was completed, and traffic to this destination is allowed. You + // can set status to DISABLED by calling UpdateTopicRuleDestination. + // + // DISABLED + // + // Confirmation was completed, and traffic to this destination is not allowed. + // You can set status to ENABLED by calling UpdateTopicRuleDestination. + // + // ERROR + // + // Confirmation could not be completed, for example if the confirmation timed + // out. You can call GetTopicRuleDestination for details about the error. You + // can set status to IN_PROGRESS by calling UpdateTopicRuleDestination. Calling + // UpdateTopicRuleDestination causes a new confirmation challenge to be sent + // to your confirmation endpoint. + // + // Status is a required field + Status TopicRuleDestinationStatus `locationName:"status" type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s UpdateTopicRuleDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTopicRuleDestinationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateTopicRuleDestinationInput"} + + if s.Arn == nil { + invalidParams.Add(aws.NewErrParamRequired("Arn")) + } + if len(s.Status) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateTopicRuleDestinationInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "status", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +type UpdateTopicRuleDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateTopicRuleDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateTopicRuleDestinationOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opUpdateTopicRuleDestination = "UpdateTopicRuleDestination" + +// UpdateTopicRuleDestinationRequest returns a request value for making API operation for +// AWS IoT. +// +// Updates a topic rule destination. You use this to change the status, endpoint +// URL, or confirmation URL of the destination. +// +// // Example sending a request using UpdateTopicRuleDestinationRequest. +// req := client.UpdateTopicRuleDestinationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +func (c *Client) UpdateTopicRuleDestinationRequest(input *UpdateTopicRuleDestinationInput) UpdateTopicRuleDestinationRequest { + op := &aws.Operation{ + Name: opUpdateTopicRuleDestination, + HTTPMethod: "PATCH", + HTTPPath: "/destinations", + } + + if input == nil { + input = &UpdateTopicRuleDestinationInput{} + } + + req := c.newRequest(op, input, &UpdateTopicRuleDestinationOutput{}) + return UpdateTopicRuleDestinationRequest{Request: req, Input: input, Copy: c.UpdateTopicRuleDestinationRequest} +} + +// UpdateTopicRuleDestinationRequest is the request type for the +// UpdateTopicRuleDestination API operation. +type UpdateTopicRuleDestinationRequest struct { + *aws.Request + Input *UpdateTopicRuleDestinationInput + Copy func(*UpdateTopicRuleDestinationInput) UpdateTopicRuleDestinationRequest +} + +// Send marshals and sends the UpdateTopicRuleDestination API request. +func (r UpdateTopicRuleDestinationRequest) Send(ctx context.Context) (*UpdateTopicRuleDestinationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateTopicRuleDestinationResponse{ + UpdateTopicRuleDestinationOutput: r.Request.Data.(*UpdateTopicRuleDestinationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateTopicRuleDestinationResponse is the response type for the +// UpdateTopicRuleDestination API operation. +type UpdateTopicRuleDestinationResponse struct { + *UpdateTopicRuleDestinationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateTopicRuleDestination request. +func (r *UpdateTopicRuleDestinationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/iot/api_types.go b/service/iot/api_types.go index cbfdc072c70..a18551c1a11 100644 --- a/service/iot/api_types.go +++ b/service/iot/api_types.go @@ -184,6 +184,9 @@ type Action struct { // Write to an Amazon Kinesis Firehose stream. Firehose *FirehoseAction `locationName:"firehose" type:"structure"` + // Send data to an HTTPS endpoint. + Http *HttpAction `locationName:"http" type:"structure"` + // Sends message data to an AWS IoT Analytics channel. IotAnalytics *IotAnalyticsAction `locationName:"iotAnalytics" type:"structure"` @@ -253,6 +256,11 @@ func (s *Action) Validate() error { invalidParams.AddNested("Firehose", err.(aws.ErrInvalidParams)) } } + if s.Http != nil { + if err := s.Http.Validate(); err != nil { + invalidParams.AddNested("Http", err.(aws.ErrInvalidParams)) + } + } if s.IotEvents != nil { if err := s.IotEvents.Validate(); err != nil { invalidParams.AddNested("IotEvents", err.(aws.ErrInvalidParams)) @@ -343,6 +351,12 @@ func (s Action) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "firehose", v, metadata) } + if s.Http != nil { + v := s.Http + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "http", v, metadata) + } if s.IotAnalytics != nil { v := s.IotAnalytics @@ -3131,6 +3145,39 @@ func (s ExponentialRolloutRate) MarshalFields(e protocol.FieldEncoder) error { return nil } +// The field to aggregate. +type Field struct { + _ struct{} `type:"structure"` + + // The name of the field. + Name *string `locationName:"name" type:"string"` + + // The data type of the field. + Type FieldType `locationName:"type" type:"string" enum:"true"` +} + +// String returns the string representation +func (s Field) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Field) MarshalFields(e protocol.FieldEncoder) error { + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + // The location of the OTA update. type FileLocation struct { _ struct{} `type:"structure"` @@ -3283,6 +3330,286 @@ func (s GroupNameAndArn) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Send data to an HTTPS endpoint. +type HttpAction struct { + _ struct{} `type:"structure"` + + // The authentication method to use when sending data to an HTTPS endpoint. + Auth *HttpAuthorization `locationName:"auth" type:"structure"` + + // The URL to which AWS IoT sends a confirmation message. The value of the confirmation + // URL must be a prefix of the endpoint URL. If you do not specify a confirmation + // URL AWS IoT uses the endpoint URL as the confirmation URL. If you use substitution + // templates in the confirmationUrl, you must create and enable topic rule destinations + // that match each possible value of the substituion template before traffic + // is allowed to your endpoint URL. + ConfirmationUrl *string `locationName:"confirmationUrl" type:"string"` + + // The HTTP headers to send with the message data. + Headers []HttpActionHeader `locationName:"headers" type:"list"` + + // The endpoint URL. If substitution templates are used in the URL, you must + // also specify a confirmationUrl. If this is a new destination, a new TopicRuleDestination + // is created if possible. + // + // Url is a required field + Url *string `locationName:"url" type:"string" required:"true"` +} + +// String returns the string representation +func (s HttpAction) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpAction) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "HttpAction"} + + if s.Url == nil { + invalidParams.Add(aws.NewErrParamRequired("Url")) + } + if s.Auth != nil { + if err := s.Auth.Validate(); err != nil { + invalidParams.AddNested("Auth", err.(aws.ErrInvalidParams)) + } + } + if s.Headers != nil { + for i, v := range s.Headers { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Headers", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s HttpAction) MarshalFields(e protocol.FieldEncoder) error { + if s.Auth != nil { + v := s.Auth + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "auth", v, metadata) + } + if s.ConfirmationUrl != nil { + v := *s.ConfirmationUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "confirmationUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Headers != nil { + v := s.Headers + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "headers", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.Url != nil { + v := *s.Url + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "url", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The HTTP action header. +type HttpActionHeader struct { + _ struct{} `type:"structure"` + + // The HTTP header key. + // + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The HTTP header value. Substitution templates are supported. + // + // Value is a required field + Value *string `locationName:"value" type:"string" required:"true"` +} + +// String returns the string representation +func (s HttpActionHeader) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpActionHeader) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "HttpActionHeader"} + + if s.Key == nil { + invalidParams.Add(aws.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Key", 1)) + } + + if s.Value == nil { + invalidParams.Add(aws.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s HttpActionHeader) MarshalFields(e protocol.FieldEncoder) error { + if s.Key != nil { + v := *s.Key + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "key", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Value != nil { + v := *s.Value + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "value", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The authorization method used to send messages. +type HttpAuthorization struct { + _ struct{} `type:"structure"` + + // Use Sig V4 authorization. For more information, see Signature Version 4 Signing + // Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). + Sigv4 *SigV4Authorization `locationName:"sigv4" type:"structure"` +} + +// String returns the string representation +func (s HttpAuthorization) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpAuthorization) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "HttpAuthorization"} + if s.Sigv4 != nil { + if err := s.Sigv4.Validate(); err != nil { + invalidParams.AddNested("Sigv4", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s HttpAuthorization) MarshalFields(e protocol.FieldEncoder) error { + if s.Sigv4 != nil { + v := s.Sigv4 + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "sigv4", v, metadata) + } + return nil +} + +// HTTP URL destination configuration used by the topic rule's HTTP action. +type HttpUrlDestinationConfiguration struct { + _ struct{} `type:"structure"` + + // The URL AWS IoT uses to confirm ownership of or access to the topic rule + // destination URL. + // + // ConfirmationUrl is a required field + ConfirmationUrl *string `locationName:"confirmationUrl" type:"string" required:"true"` +} + +// String returns the string representation +func (s HttpUrlDestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpUrlDestinationConfiguration) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "HttpUrlDestinationConfiguration"} + + if s.ConfirmationUrl == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfirmationUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s HttpUrlDestinationConfiguration) MarshalFields(e protocol.FieldEncoder) error { + if s.ConfirmationUrl != nil { + v := *s.ConfirmationUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "confirmationUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// HTTP URL destination properties. +type HttpUrlDestinationProperties struct { + _ struct{} `type:"structure"` + + // The URL used to confirm the HTTP topic rule destination URL. + ConfirmationUrl *string `locationName:"confirmationUrl" type:"string"` +} + +// String returns the string representation +func (s HttpUrlDestinationProperties) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s HttpUrlDestinationProperties) MarshalFields(e protocol.FieldEncoder) error { + if s.ConfirmationUrl != nil { + v := *s.ConfirmationUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "confirmationUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Information about an HTTP URL destination. +type HttpUrlDestinationSummary struct { + _ struct{} `type:"structure"` + + // The URL used to confirm ownership of or access to the HTTP topic rule destination + // URL. + ConfirmationUrl *string `locationName:"confirmationUrl" type:"string"` +} + +// String returns the string representation +func (s HttpUrlDestinationSummary) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s HttpUrlDestinationSummary) MarshalFields(e protocol.FieldEncoder) error { + if s.ConfirmationUrl != nil { + v := *s.ConfirmationUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "confirmationUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + // Information that implicitly denies authorization. When policy doesn't explicitly // deny or allow an action on a resource it is considered an implicit deny. type ImplicitDeny struct { @@ -5109,6 +5436,39 @@ func (s OutgoingCertificate) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Describes the percentile and percentile value. +type PercentPair struct { + _ struct{} `type:"structure"` + + // The percentile. + Percent *float64 `locationName:"percent" type:"double"` + + // The value of the percentile. + Value *float64 `locationName:"value" type:"double"` +} + +// String returns the string representation +func (s PercentPair) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PercentPair) MarshalFields(e protocol.FieldEncoder) error { + if s.Percent != nil { + v := *s.Percent + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "percent", protocol.Float64Value(v), metadata) + } + if s.Value != nil { + v := *s.Value + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "value", protocol.Float64Value(v), metadata) + } + return nil +} + // Describes an AWS IoT policy. type Policy struct { _ struct{} `type:"structure"` @@ -5557,7 +5917,8 @@ func (s ReplaceDefaultPolicyVersionParams) MarshalFields(e protocol.FieldEncoder type RepublishAction struct { _ struct{} `type:"structure"` - // The Quality of Service (QoS) level to use when republishing messages. + // The Quality of Service (QoS) level to use when republishing messages. The + // default value is 0. Qos *int64 `locationName:"qos" type:"integer"` // The ARN of the IAM role that grants access. @@ -6194,6 +6555,76 @@ func (s SecurityProfileTargetMapping) MarshalFields(e protocol.FieldEncoder) err return nil } +// Use Sig V4 authorization. +type SigV4Authorization struct { + _ struct{} `type:"structure"` + + // The ARN of the signing role. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The service name to use while signing with Sig V4. + // + // ServiceName is a required field + ServiceName *string `locationName:"serviceName" type:"string" required:"true"` + + // The signing region. + // + // SigningRegion is a required field + SigningRegion *string `locationName:"signingRegion" type:"string" required:"true"` +} + +// String returns the string representation +func (s SigV4Authorization) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SigV4Authorization) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SigV4Authorization"} + + if s.RoleArn == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleArn")) + } + + if s.ServiceName == nil { + invalidParams.Add(aws.NewErrParamRequired("ServiceName")) + } + + if s.SigningRegion == nil { + invalidParams.Add(aws.NewErrParamRequired("SigningRegion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s SigV4Authorization) MarshalFields(e protocol.FieldEncoder) error { + if s.RoleArn != nil { + v := *s.RoleArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "roleArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ServiceName != nil { + v := *s.ServiceName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "serviceName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SigningRegion != nil { + v := *s.SigningRegion + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "signingRegion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + // Describes the code-signing profile. type SigningProfileParameter struct { _ struct{} `type:"structure"` @@ -6463,8 +6894,33 @@ func (s StatisticalThreshold) MarshalFields(e protocol.FieldEncoder) error { type Statistics struct { _ struct{} `type:"structure"` + // The average of the aggregated fields. If the field data type is String this + // value is indeterminate. + Average *float64 `locationName:"average" type:"double"` + // The count of things that match the query. Count *int64 `locationName:"count" type:"integer"` + + // The maximum value of the aggregated fields. If the field data type is String + // this value is indeterminate. + Maximum *float64 `locationName:"maximum" type:"double"` + + // The minimum value of the aggregated fields. If the field data type is String + // this value is indeterminate. + Minimum *float64 `locationName:"minimum" type:"double"` + + // The standard deviation of the aggregated field values. + StdDeviation *float64 `locationName:"stdDeviation" type:"double"` + + // The sum of the aggregated fields. If the field data type is String this value + // is indeterminate. + Sum *float64 `locationName:"sum" type:"double"` + + // The sum of the squares of the aggregated field values. + SumOfSquares *float64 `locationName:"sumOfSquares" type:"double"` + + // The variance of the aggregated field values. + Variance *float64 `locationName:"variance" type:"double"` } // String returns the string representation @@ -6474,12 +6930,54 @@ func (s Statistics) String() string { // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s Statistics) MarshalFields(e protocol.FieldEncoder) error { + if s.Average != nil { + v := *s.Average + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "average", protocol.Float64Value(v), metadata) + } if s.Count != nil { v := *s.Count metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "count", protocol.Int64Value(v), metadata) } + if s.Maximum != nil { + v := *s.Maximum + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "maximum", protocol.Float64Value(v), metadata) + } + if s.Minimum != nil { + v := *s.Minimum + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "minimum", protocol.Float64Value(v), metadata) + } + if s.StdDeviation != nil { + v := *s.StdDeviation + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "stdDeviation", protocol.Float64Value(v), metadata) + } + if s.Sum != nil { + v := *s.Sum + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "sum", protocol.Float64Value(v), metadata) + } + if s.SumOfSquares != nil { + v := *s.SumOfSquares + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "sumOfSquares", protocol.Float64Value(v), metadata) + } + if s.Variance != nil { + v := *s.Variance + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "variance", protocol.Float64Value(v), metadata) + } return nil } @@ -7234,6 +7732,13 @@ func (s ThingGroupDocument) MarshalFields(e protocol.FieldEncoder) error { type ThingGroupIndexingConfiguration struct { _ struct{} `type:"structure"` + // A list of thing group fields to index. This list cannot contain any managed + // fields. Use the GetIndexingConfiguration API to get a list of managed fields. + CustomFields []Field `locationName:"customFields" type:"list"` + + // A list of automatically indexed thing group fields. + ManagedFields []Field `locationName:"managedFields" type:"list"` + // Thing group indexing mode. // // ThingGroupIndexingMode is a required field @@ -7260,6 +7765,30 @@ func (s *ThingGroupIndexingConfiguration) Validate() error { // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s ThingGroupIndexingConfiguration) MarshalFields(e protocol.FieldEncoder) error { + if s.CustomFields != nil { + v := s.CustomFields + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "customFields", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.ManagedFields != nil { + v := s.ManagedFields + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "managedFields", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } if len(s.ThingGroupIndexingMode) > 0 { v := s.ThingGroupIndexingMode @@ -7356,6 +7885,13 @@ func (s ThingGroupProperties) MarshalFields(e protocol.FieldEncoder) error { type ThingIndexingConfiguration struct { _ struct{} `type:"structure"` + // A list of thing fields to index. This list cannot contain any managed fields. + // Use the GetIndexingConfiguration API to get a list of managed fields. + CustomFields []Field `locationName:"customFields" type:"list"` + + // A list of automatically indexed thing fields. + ManagedFields []Field `locationName:"managedFields" type:"list"` + // Thing connectivity indexing mode. Valid values are: // // * STATUS – Your thing index contains connectivity status. To enable @@ -7397,6 +7933,30 @@ func (s *ThingIndexingConfiguration) Validate() error { // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s ThingIndexingConfiguration) MarshalFields(e protocol.FieldEncoder) error { + if s.CustomFields != nil { + v := s.CustomFields + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "customFields", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.ManagedFields != nil { + v := s.ManagedFields + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "managedFields", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } if len(s.ThingConnectivityIndexingMode) > 0 { v := s.ThingConnectivityIndexingMode @@ -7679,6 +8239,198 @@ func (s TopicRule) MarshalFields(e protocol.FieldEncoder) error { return nil } +// A topic rule destination. +type TopicRuleDestination struct { + _ struct{} `type:"structure"` + + // The topic rule destination URL. + Arn *string `locationName:"arn" type:"string"` + + // Properties of the HTTP URL. + HttpUrlProperties *HttpUrlDestinationProperties `locationName:"httpUrlProperties" type:"structure"` + + // The status of the topic rule destination. Valid values are: + // + // IN_PROGRESS + // + // A topic rule destination was created but has not been confirmed. You can + // set status to IN_PROGRESS by calling UpdateTopicRuleDestination. Calling + // UpdateTopicRuleDestination causes a new confirmation challenge to be sent + // to your confirmation endpoint. + // + // ENABLED + // + // Confirmation was completed, and traffic to this destination is allowed. You + // can set status to DISABLED by calling UpdateTopicRuleDestination. + // + // DISABLED + // + // Confirmation was completed, and traffic to this destination is not allowed. + // You can set status to ENABLED by calling UpdateTopicRuleDestination. + // + // ERROR + // + // Confirmation could not be completed, for example if the confirmation timed + // out. You can call GetTopicRuleDestination for details about the error. You + // can set status to IN_PROGRESS by calling UpdateTopicRuleDestination. Calling + // UpdateTopicRuleDestination causes a new confirmation challenge to be sent + // to your confirmation endpoint. + Status TopicRuleDestinationStatus `locationName:"status" type:"string" enum:"true"` + + // Additional details or reason why the topic rule destination is in the current + // status. + StatusReason *string `locationName:"statusReason" type:"string"` +} + +// String returns the string representation +func (s TopicRuleDestination) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TopicRuleDestination) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.HttpUrlProperties != nil { + v := s.HttpUrlProperties + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "httpUrlProperties", v, metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "status", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.StatusReason != nil { + v := *s.StatusReason + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "statusReason", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Configuration of the topic rule destination. +type TopicRuleDestinationConfiguration struct { + _ struct{} `type:"structure"` + + // Configuration of the HTTP URL. + HttpUrlConfiguration *HttpUrlDestinationConfiguration `locationName:"httpUrlConfiguration" type:"structure"` +} + +// String returns the string representation +func (s TopicRuleDestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicRuleDestinationConfiguration) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TopicRuleDestinationConfiguration"} + if s.HttpUrlConfiguration != nil { + if err := s.HttpUrlConfiguration.Validate(); err != nil { + invalidParams.AddNested("HttpUrlConfiguration", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TopicRuleDestinationConfiguration) MarshalFields(e protocol.FieldEncoder) error { + if s.HttpUrlConfiguration != nil { + v := s.HttpUrlConfiguration + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "httpUrlConfiguration", v, metadata) + } + return nil +} + +// Information about the topic rule destination. +type TopicRuleDestinationSummary struct { + _ struct{} `type:"structure"` + + // The topic rule destination ARN. + Arn *string `locationName:"arn" type:"string"` + + // Information about the HTTP URL. + HttpUrlSummary *HttpUrlDestinationSummary `locationName:"httpUrlSummary" type:"structure"` + + // The status of the topic rule destination. Valid values are: + // + // IN_PROGRESS + // + // A topic rule destination was created but has not been confirmed. You can + // set status to IN_PROGRESS by calling UpdateTopicRuleDestination. Calling + // UpdateTopicRuleDestination causes a new confirmation challenge to be sent + // to your confirmation endpoint. + // + // ENABLED + // + // Confirmation was completed, and traffic to this destination is allowed. You + // can set status to DISABLED by calling UpdateTopicRuleDestination. + // + // DISABLED + // + // Confirmation was completed, and traffic to this destination is not allowed. + // You can set status to ENABLED by calling UpdateTopicRuleDestination. + // + // ERROR + // + // Confirmation could not be completed, for example if the confirmation timed + // out. You can call GetTopicRuleDestination for details about the error. You + // can set status to IN_PROGRESS by calling UpdateTopicRuleDestination. Calling + // UpdateTopicRuleDestination causes a new confirmation challenge to be sent + // to your confirmation endpoint. + Status TopicRuleDestinationStatus `locationName:"status" type:"string" enum:"true"` + + // The reason the topic rule destination is in the current status. + StatusReason *string `locationName:"statusReason" type:"string"` +} + +// String returns the string representation +func (s TopicRuleDestinationSummary) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TopicRuleDestinationSummary) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.HttpUrlSummary != nil { + v := s.HttpUrlSummary + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "httpUrlSummary", v, metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "status", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.StatusReason != nil { + v := *s.StatusReason + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "statusReason", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + // Describes a rule. type TopicRuleListItem struct { _ struct{} `type:"structure"` diff --git a/service/iot/iotiface/interface.go b/service/iot/iotiface/interface.go index 9916b2fc183..ea8ad9d54bf 100644 --- a/service/iot/iotiface/interface.go +++ b/service/iot/iotiface/interface.go @@ -89,6 +89,8 @@ type ClientAPI interface { ClearDefaultAuthorizerRequest(*iot.ClearDefaultAuthorizerInput) iot.ClearDefaultAuthorizerRequest + ConfirmTopicRuleDestinationRequest(*iot.ConfirmTopicRuleDestinationInput) iot.ConfirmTopicRuleDestinationRequest + CreateAuthorizerRequest(*iot.CreateAuthorizerInput) iot.CreateAuthorizerRequest CreateBillingGroupRequest(*iot.CreateBillingGroupInput) iot.CreateBillingGroupRequest @@ -125,6 +127,8 @@ type ClientAPI interface { CreateTopicRuleRequest(*iot.CreateTopicRuleInput) iot.CreateTopicRuleRequest + CreateTopicRuleDestinationRequest(*iot.CreateTopicRuleDestinationInput) iot.CreateTopicRuleDestinationRequest + DeleteAccountAuditConfigurationRequest(*iot.DeleteAccountAuditConfigurationInput) iot.DeleteAccountAuditConfigurationRequest DeleteAuthorizerRequest(*iot.DeleteAuthorizerInput) iot.DeleteAuthorizerRequest @@ -167,6 +171,8 @@ type ClientAPI interface { DeleteTopicRuleRequest(*iot.DeleteTopicRuleInput) iot.DeleteTopicRuleRequest + DeleteTopicRuleDestinationRequest(*iot.DeleteTopicRuleDestinationInput) iot.DeleteTopicRuleDestinationRequest + DeleteV2LoggingLevelRequest(*iot.DeleteV2LoggingLevelInput) iot.DeleteV2LoggingLevelRequest DeprecateThingTypeRequest(*iot.DeprecateThingTypeInput) iot.DeprecateThingTypeRequest @@ -229,6 +235,8 @@ type ClientAPI interface { EnableTopicRuleRequest(*iot.EnableTopicRuleInput) iot.EnableTopicRuleRequest + GetCardinalityRequest(*iot.GetCardinalityInput) iot.GetCardinalityRequest + GetEffectivePoliciesRequest(*iot.GetEffectivePoliciesInput) iot.GetEffectivePoliciesRequest GetIndexingConfigurationRequest(*iot.GetIndexingConfigurationInput) iot.GetIndexingConfigurationRequest @@ -239,6 +247,8 @@ type ClientAPI interface { GetOTAUpdateRequest(*iot.GetOTAUpdateInput) iot.GetOTAUpdateRequest + GetPercentilesRequest(*iot.GetPercentilesInput) iot.GetPercentilesRequest + GetPolicyRequest(*iot.GetPolicyInput) iot.GetPolicyRequest GetPolicyVersionRequest(*iot.GetPolicyVersionInput) iot.GetPolicyVersionRequest @@ -249,6 +259,8 @@ type ClientAPI interface { GetTopicRuleRequest(*iot.GetTopicRuleInput) iot.GetTopicRuleRequest + GetTopicRuleDestinationRequest(*iot.GetTopicRuleDestinationInput) iot.GetTopicRuleDestinationRequest + GetV2LoggingOptionsRequest(*iot.GetV2LoggingOptionsInput) iot.GetV2LoggingOptionsRequest ListActiveViolationsRequest(*iot.ListActiveViolationsInput) iot.ListActiveViolationsRequest @@ -331,6 +343,8 @@ type ClientAPI interface { ListThingsInThingGroupRequest(*iot.ListThingsInThingGroupInput) iot.ListThingsInThingGroupRequest + ListTopicRuleDestinationsRequest(*iot.ListTopicRuleDestinationsInput) iot.ListTopicRuleDestinationsRequest + ListTopicRulesRequest(*iot.ListTopicRulesInput) iot.ListTopicRulesRequest ListV2LoggingLevelsRequest(*iot.ListV2LoggingLevelsInput) iot.ListV2LoggingLevelsRequest @@ -415,6 +429,8 @@ type ClientAPI interface { UpdateThingGroupsForThingRequest(*iot.UpdateThingGroupsForThingInput) iot.UpdateThingGroupsForThingRequest + UpdateTopicRuleDestinationRequest(*iot.UpdateTopicRuleDestinationInput) iot.UpdateTopicRuleDestinationRequest + ValidateSecurityProfileBehaviorsRequest(*iot.ValidateSecurityProfileBehaviorsInput) iot.ValidateSecurityProfileBehaviorsRequest } diff --git a/service/lambda/api_enums.go b/service/lambda/api_enums.go index 321638d386e..d8e14e3af70 100644 --- a/service/lambda/api_enums.go +++ b/service/lambda/api_enums.go @@ -80,10 +80,13 @@ const ( RuntimeNodejs610 Runtime = "nodejs6.10" RuntimeNodejs810 Runtime = "nodejs8.10" RuntimeNodejs10X Runtime = "nodejs10.x" + RuntimeNodejs12X Runtime = "nodejs12.x" RuntimeJava8 Runtime = "java8" + RuntimeJava11 Runtime = "java11" RuntimePython27 Runtime = "python2.7" RuntimePython36 Runtime = "python3.6" RuntimePython37 Runtime = "python3.7" + RuntimePython38 Runtime = "python3.8" RuntimeDotnetcore10 Runtime = "dotnetcore1.0" RuntimeDotnetcore20 Runtime = "dotnetcore2.0" RuntimeDotnetcore21 Runtime = "dotnetcore2.1" diff --git a/service/marketplacemetering/api_doc.go b/service/marketplacemetering/api_doc.go index 482c9d764a7..046875f7bb4 100644 --- a/service/marketplacemetering/api_doc.go +++ b/service/marketplacemetering/api_doc.go @@ -29,11 +29,10 @@ // // * Paid container software products sold through AWS Marketplace must integrate // with the AWS Marketplace Metering Service and call the RegisterUsage operation -// for software entitlement and metering. Calling RegisterUsage from containers -// running outside of Amazon Elastic Container Service (Amazon ECR) isn't -// supported. Free and BYOL products for ECS aren't required to call RegisterUsage, -// but you can do so if you want to receive usage data in your seller reports. -// For more information on using the RegisterUsage operation, see Container-Based +// for software entitlement and metering. Free and BYOL products for Amazon +// ECS or Amazon EKS aren't required to call RegisterUsage, but you can do +// so if you want to receive usage data in your seller reports. For more +// information on using the RegisterUsage operation, see Container-Based // Products (https://docs.aws.amazon.com/marketplace/latest/userguide/container-based-products.html). // // BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail diff --git a/service/marketplacemetering/api_op_RegisterUsage.go b/service/marketplacemetering/api_op_RegisterUsage.go index 93a35fa541d..db1cb9e8627 100644 --- a/service/marketplacemetering/api_op_RegisterUsage.go +++ b/service/marketplacemetering/api_op_RegisterUsage.go @@ -81,12 +81,11 @@ const opRegisterUsage = "RegisterUsage" // // Paid container software products sold through AWS Marketplace must integrate // with the AWS Marketplace Metering Service and call the RegisterUsage operation -// for software entitlement and metering. Calling RegisterUsage from containers -// running outside of ECS is not currently supported. Free and BYOL products -// for ECS aren't required to call RegisterUsage, but you may choose to do so -// if you would like to receive usage data in your seller reports. The sections -// below explain the behavior of RegisterUsage. RegisterUsage performs two primary -// functions: metering and entitlement. +// for software entitlement and metering. Free and BYOL products for Amazon +// ECS or Amazon EKS aren't required to call RegisterUsage, but you may choose +// to do so if you would like to receive usage data in your seller reports. +// The sections below explain the behavior of RegisterUsage. RegisterUsage performs +// two primary functions: metering and entitlement. // // * Entitlement: RegisterUsage allows you to verify that the customer running // your paid software is subscribed to your product on AWS Marketplace, enabling @@ -94,22 +93,23 @@ const opRegisterUsage = "RegisterUsage" // with RegisterUsage is only required to guard against unauthorized use // at container startup, as such a CustomerNotSubscribedException/PlatformNotSupportedException // will only be thrown on the initial call to RegisterUsage. Subsequent calls -// from the same Amazon ECS task instance (e.g. task-id) will not throw a -// CustomerNotSubscribedException, even if the customer unsubscribes while -// the Amazon ECS task is still running. +// from the same Amazon ECS task instance (e.g. task-id) or Amazon EKS pod +// will not throw a CustomerNotSubscribedException, even if the customer +// unsubscribes while the Amazon ECS task or Amazon EKS pod is still running. // // * Metering: RegisterUsage meters software use per ECS task, per hour, -// with usage prorated to the second. A minimum of 1 minute of usage applies -// to tasks that are short lived. For example, if a customer has a 10 node -// ECS cluster and creates an ECS service configured as a Daemon Set, then -// ECS will launch a task on all 10 cluster nodes and the customer will be -// charged: (10 * hourly_rate). Metering for software use is automatically -// handled by the AWS Marketplace Metering Control Plane -- your software -// is not required to perform any metering specific actions, other than call -// RegisterUsage once for metering of software use to commence. The AWS Marketplace -// Metering Control Plane will also continue to bill customers for running -// ECS tasks, regardless of the customers subscription state, removing the -// need for your software to perform entitlement checks at runtime. +// or per pod for Amazon EKS with usage prorated to the second. A minimum +// of 1 minute of usage applies to tasks that are short lived. For example, +// if a customer has a 10 node Amazon ECS or Amazon EKS cluster and a service +// configured as a Daemon Set, then Amazon ECS or Amazon EKS will launch +// a task on all 10 cluster nodes and the customer will be charged: (10 * +// hourly_rate). Metering for software use is automatically handled by the +// AWS Marketplace Metering Control Plane -- your software is not required +// to perform any metering specific actions, other than call RegisterUsage +// once for metering of software use to commence. The AWS Marketplace Metering +// Control Plane will also continue to bill customers for running ECS tasks +// and Amazon EKS pods, regardless of the customers subscription state, removing +// the need for your software to perform entitlement checks at runtime. // // // Example sending a request using RegisterUsageRequest. // req := client.RegisterUsageRequest(params) diff --git a/service/mediaconvert/api_enums.go b/service/mediaconvert/api_enums.go index 2842db4b0ed..23441eacd31 100644 --- a/service/mediaconvert/api_enums.go +++ b/service/mediaconvert/api_enums.go @@ -437,10 +437,12 @@ func (enum AudioDefaultSelection) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } -// Choosing FOLLOW_INPUT will cause the ISO 639 language code of the output -// to follow the ISO 639 language code of the input. The language specified -// for languageCode' will be used when USE_CONFIGURED is selected or when FOLLOW_INPUT -// is selected but there is no ISO 639 language code specified by the input. +// Specify which source for language code takes precedence for this audio track. +// When you choose Follow input (FOLLOW_INPUT), the service uses the language +// code from the input track if it's present. If there's no languge code on +// the input track, the service uses the code that you specify in the setting +// Language code (languageCode or customLanguageCode). When you choose Use configured +// (USE_CONFIGURED), the service uses the language code that you specify. type AudioLanguageCodeControl string // Enum values for AudioLanguageCodeControl @@ -1398,6 +1400,45 @@ func (enum DescribeEndpointsMode) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +// Use Dolby Vision Mode to choose how the service will handle Dolby Vision +// MaxCLL and MaxFALL properies. +type DolbyVisionLevel6Mode string + +// Enum values for DolbyVisionLevel6Mode +const ( + DolbyVisionLevel6ModePassthrough DolbyVisionLevel6Mode = "PASSTHROUGH" + DolbyVisionLevel6ModeRecalculate DolbyVisionLevel6Mode = "RECALCULATE" + DolbyVisionLevel6ModeSpecify DolbyVisionLevel6Mode = "SPECIFY" +) + +func (enum DolbyVisionLevel6Mode) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DolbyVisionLevel6Mode) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// In the current MediaConvert implementation, the Dolby Vision profile is always +// 5 (PROFILE_5). Therefore, all of your inputs must contain Dolby Vision frame +// interleaved data. +type DolbyVisionProfile string + +// Enum values for DolbyVisionProfile +const ( + DolbyVisionProfileProfile5 DolbyVisionProfile = "PROFILE_5" +) + +func (enum DolbyVisionProfile) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DolbyVisionProfile) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + // Applies only to 29.97 fps outputs. When this feature is enabled, the service // will use drop-frame timecode on outputs. If it is not possible to use drop-frame // timecode, the system will fall back to non-drop-frame. This setting is enabled @@ -3099,14 +3140,15 @@ func (enum H265UnregisteredSeiTimecode) MarshalValueBuf(b []byte) ([]byte, error } // If the location of parameter set NAL units doesn't matter in your workflow, -// ignore this setting. Use this setting in your CMAF, DASH, or file MP4 output. -// For file MP4 outputs, choosing HVC1 can create video that doesn't work properly -// with some downstream systems and video players. Choose HVC1 to mark your -// output as HVC1. This makes your output compliant with the following specification: -// ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these -// outputs, the service stores parameter set NAL units in the sample headers -// but not in the samples directly. The service defaults to marking your output -// as HEV1. For these outputs, the service writes parameter set NAL units directly +// ignore this setting. Use this setting only with CMAF or DASH outputs, or +// with standalone file outputs in an MPEG-4 container (MP4 outputs). Choose +// HVC1 to mark your output as HVC1. This makes your output compliant with the +// following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 +// 3rd Edition. For these outputs, the service stores parameter set NAL units +// in the sample headers but not in the samples directly. For MP4 outputs, when +// you choose HVC1, your output video might not work properly with some downstream +// systems and video players. The service defaults to marking your output as +// HEV1. For these outputs, the service writes parameter set NAL units directly // into the samples. type H265WriteMp4PackagingType string @@ -4454,6 +4496,71 @@ func (enum Mp4MoovPlacement) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +// Use this setting only in DASH output groups that include sidecar TTML or +// IMSC captions. You specify sidecar captions in a separate output from your +// audio and video. Choose Raw (RAW) for captions in a single XML file in a +// raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in +// XML format contained within fragmented MP4 files. This set of fragmented +// MP4 files is separate from your video and audio fragmented MP4 files. +type MpdCaptionContainerType string + +// Enum values for MpdCaptionContainerType +const ( + MpdCaptionContainerTypeRaw MpdCaptionContainerType = "RAW" + MpdCaptionContainerTypeFragmentedMp4 MpdCaptionContainerType = "FRAGMENTED_MP4" +) + +func (enum MpdCaptionContainerType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum MpdCaptionContainerType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// Use this setting only when you specify SCTE-35 markers from ESAM. Choose +// INSERT to put SCTE-35 markers in this output at the insertion points that +// you specify in an ESAM XML document. Provide the document in the setting +// SCC XML (sccXml). +type MpdScte35Esam string + +// Enum values for MpdScte35Esam +const ( + MpdScte35EsamInsert MpdScte35Esam = "INSERT" + MpdScte35EsamNone MpdScte35Esam = "NONE" +) + +func (enum MpdScte35Esam) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum MpdScte35Esam) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// Ignore this setting unless you have SCTE-35 markers in your input video file. +// Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear +// in your input to also appear in this output. Choose None (NONE) if you don't +// want those SCTE-35 markers in this output. +type MpdScte35Source string + +// Enum values for MpdScte35Source +const ( + MpdScte35SourcePassthrough MpdScte35Source = "PASSTHROUGH" + MpdScte35SourceNone MpdScte35Source = "NONE" +) + +func (enum MpdScte35Source) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum MpdScte35Source) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual // quality. type Mpeg2AdaptiveQuantization string @@ -5270,6 +5377,26 @@ func (enum RespondToAfd) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +// Choose an Amazon S3 canned ACL for MediaConvert to apply to this output. +type S3ObjectCannedAcl string + +// Enum values for S3ObjectCannedAcl +const ( + S3ObjectCannedAclPublicRead S3ObjectCannedAcl = "PUBLIC_READ" + S3ObjectCannedAclAuthenticatedRead S3ObjectCannedAcl = "AUTHENTICATED_READ" + S3ObjectCannedAclBucketOwnerRead S3ObjectCannedAcl = "BUCKET_OWNER_READ" + S3ObjectCannedAclBucketOwnerFullControl S3ObjectCannedAcl = "BUCKET_OWNER_FULL_CONTROL" +) + +func (enum S3ObjectCannedAcl) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum S3ObjectCannedAcl) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + // Specify how you want your data keys managed. AWS uses data keys to encrypt // your content. AWS also encrypts the data keys themselves, using a customer // master key (CMK), and then stores the encrypted data keys alongside your @@ -5333,6 +5460,7 @@ type SccDestinationFramerate string const ( SccDestinationFramerateFramerate2397 SccDestinationFramerate = "FRAMERATE_23_97" SccDestinationFramerateFramerate24 SccDestinationFramerate = "FRAMERATE_24" + SccDestinationFramerateFramerate25 SccDestinationFramerate = "FRAMERATE_25" SccDestinationFramerateFramerate2997Dropframe SccDestinationFramerate = "FRAMERATE_29_97_DROPFRAME" SccDestinationFramerateFramerate2997NonDropframe SccDestinationFramerate = "FRAMERATE_29_97_NON_DROPFRAME" ) diff --git a/service/mediaconvert/api_types.go b/service/mediaconvert/api_types.go index 786a755292a..4cf14b13ee4 100644 --- a/service/mediaconvert/api_types.go +++ b/service/mediaconvert/api_types.go @@ -629,12 +629,16 @@ type AudioDescription struct { // * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings CodecSettings *AudioCodecSettings `locationName:"codecSettings" type:"structure"` - // Specify the language for this audio output track, using the ISO 639-2 or - // ISO 639-3 three-letter language code. The language specified will be used - // when 'Follow Input Language Code' is not selected or when 'Follow Input Language - // Code' is selected but there is no ISO 639 language code specified by the - // input. - CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"` + // Specify the language for this audio output track. The service puts this language + // code into your output audio track when you set Language code control (AudioLanguageCodeControl) + // to Use configured (USE_CONFIGURED). The service also uses your specified + // custom language code when you set Language code control (AudioLanguageCodeControl) + // to Follow input (FOLLOW_INPUT), but your input file doesn't specify a language + // code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming + // outputs, you can also use any other code in the full RFC-5646 specification. + // Streaming outputs are those that are in one of the following output groups: + // CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming. + CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"` // Indicates the language of the audio output track. The ISO 639 language specified // in the 'Language Code' drop down will be used when 'Follow Input Language @@ -642,10 +646,12 @@ type AudioDescription struct { // there is no ISO 639 language code specified by the input. LanguageCode LanguageCode `locationName:"languageCode" type:"string" enum:"true"` - // Choosing FOLLOW_INPUT will cause the ISO 639 language code of the output - // to follow the ISO 639 language code of the input. The language specified - // for languageCode' will be used when USE_CONFIGURED is selected or when FOLLOW_INPUT - // is selected but there is no ISO 639 language code specified by the input. + // Specify which source for language code takes precedence for this audio track. + // When you choose Follow input (FOLLOW_INPUT), the service uses the language + // code from the input track if it's present. If there's no languge code on + // the input track, the service uses the code that you specify in the setting + // Language code (languageCode or customLanguageCode). When you choose Use configured + // (USE_CONFIGURED), the service uses the language code that you specify. LanguageCodeControl AudioLanguageCodeControl `locationName:"languageCodeControl" type:"string" enum:"true"` // Advanced audio remixing settings. @@ -666,9 +672,6 @@ func (s AudioDescription) String() string { // Validate inspects the fields of the type to determine if they are valid. func (s *AudioDescription) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "AudioDescription"} - if s.CustomLanguageCode != nil && len(*s.CustomLanguageCode) < 3 { - invalidParams.Add(aws.NewErrParamMinLen("CustomLanguageCode", 3)) - } if s.AudioNormalizationSettings != nil { if err := s.AudioNormalizationSettings.Validate(); err != nil { invalidParams.AddNested("AudioNormalizationSettings", err.(aws.ErrInvalidParams)) @@ -1331,13 +1334,16 @@ type CaptionDescription struct { // each input. CaptionSelectorName *string `locationName:"captionSelectorName" min:"1" type:"string"` - // Indicates the language of the caption output track, using the ISO 639-2 or - // ISO 639-3 three-letter language code. For most captions output formats, the - // encoder puts this language information in the output captions metadata. If - // your output captions format is DVB-Sub or Burn in, the encoder uses this - // language information to choose the font language for rendering the captions - // text. - CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"` + // Specify the language for this captions output track. For most captions output + // formats, the encoder puts this language information in the output captions + // metadata. If your output captions format is DVB-Sub or Burn in, the encoder + // uses this language information when automatically selecting the font script + // for rendering the captions text. For all outputs, you can use an ISO 639-2 + // or ISO 639-3 code. For streaming outputs, you can also use any other code + // in the full RFC-5646 specification. Streaming outputs are those that are + // in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft + // Smooth Streaming. + CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"` // Specific settings required by destination type. Note that burnin_destination_settings // are not available if the source of the caption data is Embedded or Teletext. @@ -1369,9 +1375,6 @@ func (s *CaptionDescription) Validate() error { if s.CaptionSelectorName != nil && len(*s.CaptionSelectorName) < 1 { invalidParams.Add(aws.NewErrParamMinLen("CaptionSelectorName", 1)) } - if s.CustomLanguageCode != nil && len(*s.CustomLanguageCode) < 3 { - invalidParams.Add(aws.NewErrParamMinLen("CustomLanguageCode", 3)) - } if s.DestinationSettings != nil { if err := s.DestinationSettings.Validate(); err != nil { invalidParams.AddNested("DestinationSettings", err.(aws.ErrInvalidParams)) @@ -1423,13 +1426,16 @@ func (s CaptionDescription) MarshalFields(e protocol.FieldEncoder) error { type CaptionDescriptionPreset struct { _ struct{} `type:"structure"` - // Indicates the language of the caption output track, using the ISO 639-2 or - // ISO 639-3 three-letter language code. For most captions output formats, the - // encoder puts this language information in the output captions metadata. If - // your output captions format is DVB-Sub or Burn in, the encoder uses this - // language information to choose the font language for rendering the captions - // text. - CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"` + // Specify the language for this captions output track. For most captions output + // formats, the encoder puts this language information in the output captions + // metadata. If your output captions format is DVB-Sub or Burn in, the encoder + // uses this language information when automatically selecting the font script + // for rendering the captions text. For all outputs, you can use an ISO 639-2 + // or ISO 639-3 code. For streaming outputs, you can also use any other code + // in the full RFC-5646 specification. Streaming outputs are those that are + // in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft + // Smooth Streaming. + CustomLanguageCode *string `locationName:"customLanguageCode" type:"string"` // Specific settings required by destination type. Note that burnin_destination_settings // are not available if the source of the caption data is Embedded or Teletext. @@ -1458,9 +1464,6 @@ func (s CaptionDescriptionPreset) String() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionDescriptionPreset) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CaptionDescriptionPreset"} - if s.CustomLanguageCode != nil && len(*s.CustomLanguageCode) < 3 { - invalidParams.Add(aws.NewErrParamMinLen("CustomLanguageCode", 3)) - } if s.DestinationSettings != nil { if err := s.DestinationSettings.Validate(); err != nil { invalidParams.AddNested("DestinationSettings", err.(aws.ErrInvalidParams)) @@ -1859,6 +1862,67 @@ func (s ChannelMapping) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Specify the details for each pair of HLS and DASH additional manifests that +// you want the service to generate for this CMAF output group. Each pair of +// manifests can reference a different subset of outputs in the group. +type CmafAdditionalManifest struct { + _ struct{} `type:"structure"` + + // Specify a name modifier that the service adds to the name of this manifest + // to make it different from the file names of the other main manifests in the + // output group. For example, say that the default main manifest for your HLS + // group is film-name.m3u8. If you enter "-no-premium" for this setting, then + // the file name the service generates for this top-level manifest is film-name-no-premium.m3u8. + // For HLS output groups, specify a manifestNameModifier that is different from + // the nameModifier of the output. The service uses the output name modifier + // to create unique names for the individual variant manifests. + ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"` + + // Specify the outputs that you want this additional top-level manifest to reference. + SelectedOutputs []string `locationName:"selectedOutputs" type:"list"` +} + +// String returns the string representation +func (s CmafAdditionalManifest) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CmafAdditionalManifest) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CmafAdditionalManifest"} + if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ManifestNameModifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CmafAdditionalManifest) MarshalFields(e protocol.FieldEncoder) error { + if s.ManifestNameModifier != nil { + v := *s.ManifestNameModifier + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "manifestNameModifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SelectedOutputs != nil { + v := s.SelectedOutputs + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "selectedOutputs", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + // Settings for CMAF encryption type CmafEncryptionSettings struct { _ struct{} `type:"structure"` @@ -1954,6 +2018,14 @@ func (s CmafEncryptionSettings) MarshalFields(e protocol.FieldEncoder) error { type CmafGroupSettings struct { _ struct{} `type:"structure"` + // By default, the service creates one top-level .m3u8 HLS manifest and one + // top -level .mpd DASH manifest for each CMAF output group in your job. These + // default manifests reference every output in the output group. To create additional + // top-level manifests that reference a subset of the outputs in the output + // group, specify a list of them here. For each additional manifest that you + // specify, the service creates one HLS manifest and one DASH manifest. + AdditionalManifests []CmafAdditionalManifest `locationName:"additionalManifests" type:"list"` + // A partial URI prefix that will be put in the manifest file at the top level // BaseURL element. Can be used if streams are delivered from a different URL // than the manifest file. @@ -2061,6 +2133,13 @@ func (s *CmafGroupSettings) Validate() error { if s.SegmentLength != nil && *s.SegmentLength < 1 { invalidParams.Add(aws.NewErrParamMinValue("SegmentLength", 1)) } + if s.AdditionalManifests != nil { + for i, v := range s.AdditionalManifests { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(aws.ErrInvalidParams)) + } + } + } if s.Encryption != nil { if err := s.Encryption.Validate(); err != nil { invalidParams.AddNested("Encryption", err.(aws.ErrInvalidParams)) @@ -2075,6 +2154,18 @@ func (s *CmafGroupSettings) Validate() error { // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s CmafGroupSettings) MarshalFields(e protocol.FieldEncoder) error { + if s.AdditionalManifests != nil { + v := s.AdditionalManifests + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "additionalManifests", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } if s.BaseUrl != nil { v := *s.BaseUrl @@ -2318,6 +2409,9 @@ type ContainerSettings struct { // Settings for MP4 container. You can create audio-only AAC outputs with this // container. Mp4Settings *Mp4Settings `locationName:"mp4Settings" type:"structure"` + + // Settings for MP4 segments in DASH + MpdSettings *MpdSettings `locationName:"mpdSettings" type:"structure"` } // String returns the string representation @@ -2383,6 +2477,70 @@ func (s ContainerSettings) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "mp4Settings", v, metadata) } + if s.MpdSettings != nil { + v := s.MpdSettings + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "mpdSettings", v, metadata) + } + return nil +} + +// Specify the details for each additional DASH manifest that you want the service +// to generate for this output group. Each manifest can reference a different +// subset of outputs in the group. +type DashAdditionalManifest struct { + _ struct{} `type:"structure"` + + // Specify a name modifier that the service adds to the name of this manifest + // to make it different from the file names of the other main manifests in the + // output group. For example, say that the default main manifest for your DASH + // group is film-name.mpd. If you enter "-no-premium" for this setting, then + // the file name the service generates for this top-level manifest is film-name-no-premium.mpd. + ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"` + + // Specify the outputs that you want this additional top-level manifest to reference. + SelectedOutputs []string `locationName:"selectedOutputs" type:"list"` +} + +// String returns the string representation +func (s DashAdditionalManifest) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DashAdditionalManifest) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DashAdditionalManifest"} + if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ManifestNameModifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DashAdditionalManifest) MarshalFields(e protocol.FieldEncoder) error { + if s.ManifestNameModifier != nil { + v := *s.ManifestNameModifier + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "manifestNameModifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SelectedOutputs != nil { + v := s.SelectedOutputs + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "selectedOutputs", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } return nil } @@ -2431,6 +2589,12 @@ func (s DashIsoEncryptionSettings) MarshalFields(e protocol.FieldEncoder) error type DashIsoGroupSettings struct { _ struct{} `type:"structure"` + // By default, the service creates one .mpd DASH manifest for each DASH ISO + // output group in your job. This default manifest references every output in + // the output group. To create additional DASH manifests that reference a subset + // of the outputs in the output group, specify a list of them here. + AdditionalManifests []DashAdditionalManifest `locationName:"additionalManifests" type:"list"` + // A partial URI prefix that will be put in the manifest (.mpd) file at the // top level BaseURL element. Can be used if streams are delivered from a different // URL than the manifest file. @@ -2509,6 +2673,13 @@ func (s *DashIsoGroupSettings) Validate() error { if s.SegmentLength != nil && *s.SegmentLength < 1 { invalidParams.Add(aws.NewErrParamMinValue("SegmentLength", 1)) } + if s.AdditionalManifests != nil { + for i, v := range s.AdditionalManifests { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(aws.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2518,6 +2689,18 @@ func (s *DashIsoGroupSettings) Validate() error { // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s DashIsoGroupSettings) MarshalFields(e protocol.FieldEncoder) error { + if s.AdditionalManifests != nil { + v := s.AdditionalManifests + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "additionalManifests", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } if s.BaseUrl != nil { v := *s.BaseUrl @@ -2668,6 +2851,88 @@ func (s DestinationSettings) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Settings for Dolby Vision +type DolbyVision struct { + _ struct{} `type:"structure"` + + // Use these settings when you set DolbyVisionLevel6Mode to SPECIFY to override + // the MaxCLL and MaxFALL values in your input with new values. + L6Metadata *DolbyVisionLevel6Metadata `locationName:"l6Metadata" type:"structure"` + + // Use Dolby Vision Mode to choose how the service will handle Dolby Vision + // MaxCLL and MaxFALL properies. + L6Mode DolbyVisionLevel6Mode `locationName:"l6Mode" type:"string" enum:"true"` + + // In the current MediaConvert implementation, the Dolby Vision profile is always + // 5 (PROFILE_5). Therefore, all of your inputs must contain Dolby Vision frame + // interleaved data. + Profile DolbyVisionProfile `locationName:"profile" type:"string" enum:"true"` +} + +// String returns the string representation +func (s DolbyVision) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DolbyVision) MarshalFields(e protocol.FieldEncoder) error { + if s.L6Metadata != nil { + v := s.L6Metadata + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "l6Metadata", v, metadata) + } + if len(s.L6Mode) > 0 { + v := s.L6Mode + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "l6Mode", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if len(s.Profile) > 0 { + v := s.Profile + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "profile", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Use these settings when you set DolbyVisionLevel6Mode to SPECIFY to override +// the MaxCLL and MaxFALL values in your input with new values. +type DolbyVisionLevel6Metadata struct { + _ struct{} `type:"structure"` + + // Maximum Content Light Level. Static HDR metadata that corresponds to the + // brightest pixel in the entire stream. Measured in nits. + MaxCll *int64 `locationName:"maxCll" type:"integer"` + + // Maximum Frame-Average Light Level. Static HDR metadata that corresponds to + // the highest frame-average brightness in the entire stream. Measured in nits. + MaxFall *int64 `locationName:"maxFall" type:"integer"` +} + +// String returns the string representation +func (s DolbyVisionLevel6Metadata) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DolbyVisionLevel6Metadata) MarshalFields(e protocol.FieldEncoder) error { + if s.MaxCll != nil { + v := *s.MaxCll + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "maxCll", protocol.Int64Value(v), metadata) + } + if s.MaxFall != nil { + v := *s.MaxFall + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "maxFall", protocol.Int64Value(v), metadata) + } + return nil +} + // Inserts DVB Network Information Table (NIT) at the specified table repetition // interval. type DvbNitSettings struct { @@ -4865,14 +5130,15 @@ type H265Settings struct { UnregisteredSeiTimecode H265UnregisteredSeiTimecode `locationName:"unregisteredSeiTimecode" type:"string" enum:"true"` // If the location of parameter set NAL units doesn't matter in your workflow, - // ignore this setting. Use this setting in your CMAF, DASH, or file MP4 output. - // For file MP4 outputs, choosing HVC1 can create video that doesn't work properly - // with some downstream systems and video players. Choose HVC1 to mark your - // output as HVC1. This makes your output compliant with the following specification: - // ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 3rd Edition. For these - // outputs, the service stores parameter set NAL units in the sample headers - // but not in the samples directly. The service defaults to marking your output - // as HEV1. For these outputs, the service writes parameter set NAL units directly + // ignore this setting. Use this setting only with CMAF or DASH outputs, or + // with standalone file outputs in an MPEG-4 container (MP4 outputs). Choose + // HVC1 to mark your output as HVC1. This makes your output compliant with the + // following specification: ISO IECJTC1 SC29 N13798 Text ISO/IEC FDIS 14496-15 + // 3rd Edition. For these outputs, the service stores parameter set NAL units + // in the sample headers but not in the samples directly. For MP4 outputs, when + // you choose HVC1, your output video might not work properly with some downstream + // systems and video players. The service defaults to marking your output as + // HEV1. For these outputs, the service writes parameter set NAL units directly // into the samples. WriteMp4PackagingType H265WriteMp4PackagingType `locationName:"writeMp4PackagingType" type:"string" enum:"true"` } @@ -5309,6 +5575,67 @@ func (s Hdr10Metadata) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Specify the details for each additional HLS manifest that you want the service +// to generate for this output group. Each manifest can reference a different +// subset of outputs in the group. +type HlsAdditionalManifest struct { + _ struct{} `type:"structure"` + + // Specify a name modifier that the service adds to the name of this manifest + // to make it different from the file names of the other main manifests in the + // output group. For example, say that the default main manifest for your HLS + // group is film-name.m3u8. If you enter "-no-premium" for this setting, then + // the file name the service generates for this top-level manifest is film-name-no-premium.m3u8. + // For HLS output groups, specify a manifestNameModifier that is different from + // the nameModifier of the output. The service uses the output name modifier + // to create unique names for the individual variant manifests. + ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"` + + // Specify the outputs that you want this additional top-level manifest to reference. + SelectedOutputs []string `locationName:"selectedOutputs" type:"list"` +} + +// String returns the string representation +func (s HlsAdditionalManifest) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HlsAdditionalManifest) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "HlsAdditionalManifest"} + if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ManifestNameModifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s HlsAdditionalManifest) MarshalFields(e protocol.FieldEncoder) error { + if s.ManifestNameModifier != nil { + v := *s.ManifestNameModifier + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "manifestNameModifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SelectedOutputs != nil { + v := s.SelectedOutputs + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "selectedOutputs", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + // Caption Language Mapping type HlsCaptionLanguageMapping struct { _ struct{} `type:"structure"` @@ -5316,7 +5643,7 @@ type HlsCaptionLanguageMapping struct { // Caption channel. CaptionChannel *int64 `locationName:"captionChannel" type:"integer"` - // Specify the language for this caption channel, using the ISO 639-2 or ISO + // Specify the language for this captions channel, using the ISO 639-2 or ISO // 639-3 three-letter language code CustomLanguageCode *string `locationName:"customLanguageCode" min:"3" type:"string"` @@ -5487,6 +5814,12 @@ type HlsGroupSettings struct { // themselves. AdMarkers []HlsAdMarkers `locationName:"adMarkers" type:"list"` + // By default, the service creates one top-level .m3u8 HLS manifest for each + // HLS output group in your job. This default manifest references every output + // in the output group. To create additional top-level manifests that reference + // a subset of the outputs in the output group, specify a list of them here. + AdditionalManifests []HlsAdditionalManifest `locationName:"additionalManifests" type:"list"` + // A partial URI prefix that will be prepended to each output in the media .m3u8 // file. Can be used if base manifest is delivered from a different URL than // the main .m3u8 file. @@ -5616,6 +5949,13 @@ func (s *HlsGroupSettings) Validate() error { if s.TimestampDeltaMilliseconds != nil && *s.TimestampDeltaMilliseconds < -2.147483648e+09 { invalidParams.Add(aws.NewErrParamMinValue("TimestampDeltaMilliseconds", -2.147483648e+09)) } + if s.AdditionalManifests != nil { + for i, v := range s.AdditionalManifests { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(aws.ErrInvalidParams)) + } + } + } if s.CaptionLanguageMappings != nil { for i, v := range s.CaptionLanguageMappings { if err := v.Validate(); err != nil { @@ -5648,6 +5988,18 @@ func (s HlsGroupSettings) MarshalFields(e protocol.FieldEncoder) error { } ls0.End() + } + if s.AdditionalManifests != nil { + v := s.AdditionalManifests + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "additionalManifests", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + } if s.BaseUrl != nil { v := *s.BaseUrl @@ -8831,6 +9183,59 @@ func (s Mp4Settings) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Settings for MP4 segments in DASH +type MpdSettings struct { + _ struct{} `type:"structure"` + + // Use this setting only in DASH output groups that include sidecar TTML or + // IMSC captions. You specify sidecar captions in a separate output from your + // audio and video. Choose Raw (RAW) for captions in a single XML file in a + // raw container. Choose Fragmented MPEG-4 (FRAGMENTED_MP4) for captions in + // XML format contained within fragmented MP4 files. This set of fragmented + // MP4 files is separate from your video and audio fragmented MP4 files. + CaptionContainerType MpdCaptionContainerType `locationName:"captionContainerType" type:"string" enum:"true"` + + // Use this setting only when you specify SCTE-35 markers from ESAM. Choose + // INSERT to put SCTE-35 markers in this output at the insertion points that + // you specify in an ESAM XML document. Provide the document in the setting + // SCC XML (sccXml). + Scte35Esam MpdScte35Esam `locationName:"scte35Esam" type:"string" enum:"true"` + + // Ignore this setting unless you have SCTE-35 markers in your input video file. + // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear + // in your input to also appear in this output. Choose None (NONE) if you don't + // want those SCTE-35 markers in this output. + Scte35Source MpdScte35Source `locationName:"scte35Source" type:"string" enum:"true"` +} + +// String returns the string representation +func (s MpdSettings) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MpdSettings) MarshalFields(e protocol.FieldEncoder) error { + if len(s.CaptionContainerType) > 0 { + v := s.CaptionContainerType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "captionContainerType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if len(s.Scte35Esam) > 0 { + v := s.Scte35Esam + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "scte35Esam", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if len(s.Scte35Source) > 0 { + v := s.Scte35Source + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "scte35Source", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value MPEG2. type Mpeg2Settings struct { @@ -9209,6 +9614,64 @@ func (s Mpeg2Settings) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Specify the details for each additional Microsoft Smooth Streaming manifest +// that you want the service to generate for this output group. Each manifest +// can reference a different subset of outputs in the group. +type MsSmoothAdditionalManifest struct { + _ struct{} `type:"structure"` + + // Specify a name modifier that the service adds to the name of this manifest + // to make it different from the file names of the other main manifests in the + // output group. For example, say that the default main manifest for your Microsoft + // Smooth group is film-name.ismv. If you enter "-no-premium" for this setting, + // then the file name the service generates for this top-level manifest is film-name-no-premium.ismv. + ManifestNameModifier *string `locationName:"manifestNameModifier" min:"1" type:"string"` + + // Specify the outputs that you want this additional top-level manifest to reference. + SelectedOutputs []string `locationName:"selectedOutputs" type:"list"` +} + +// String returns the string representation +func (s MsSmoothAdditionalManifest) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MsSmoothAdditionalManifest) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "MsSmoothAdditionalManifest"} + if s.ManifestNameModifier != nil && len(*s.ManifestNameModifier) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ManifestNameModifier", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MsSmoothAdditionalManifest) MarshalFields(e protocol.FieldEncoder) error { + if s.ManifestNameModifier != nil { + v := *s.ManifestNameModifier + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "manifestNameModifier", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SelectedOutputs != nil { + v := s.SelectedOutputs + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "selectedOutputs", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + // If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify // the value SpekeKeyProvider. type MsSmoothEncryptionSettings struct { @@ -9241,6 +9704,13 @@ func (s MsSmoothEncryptionSettings) MarshalFields(e protocol.FieldEncoder) error type MsSmoothGroupSettings struct { _ struct{} `type:"structure"` + // By default, the service creates one .ism Microsoft Smooth Streaming manifest + // for each Microsoft Smooth Streaming output group in your job. This default + // manifest references every output in the output group. To create additional + // manifests that reference a subset of the outputs in the output group, specify + // a list of them here. + AdditionalManifests []MsSmoothAdditionalManifest `locationName:"additionalManifests" type:"list"` + // COMBINE_DUPLICATE_STREAMS combines identical audio encoding settings across // a Microsoft Smooth output group into a single audio stream. AudioDeduplication MsSmoothAudioDeduplication `locationName:"audioDeduplication" type:"string" enum:"true"` @@ -9280,6 +9750,13 @@ func (s *MsSmoothGroupSettings) Validate() error { if s.FragmentLength != nil && *s.FragmentLength < 1 { invalidParams.Add(aws.NewErrParamMinValue("FragmentLength", 1)) } + if s.AdditionalManifests != nil { + for i, v := range s.AdditionalManifests { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalManifests", i), err.(aws.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9289,6 +9766,18 @@ func (s *MsSmoothGroupSettings) Validate() error { // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s MsSmoothGroupSettings) MarshalFields(e protocol.FieldEncoder) error { + if s.AdditionalManifests != nil { + v := s.AdditionalManifests + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "additionalManifests", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } if len(s.AudioDeduplication) > 0 { v := s.AudioDeduplication @@ -10890,10 +11379,41 @@ func (s ResourceTags) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Optional. Have MediaConvert automatically apply Amazon S3 access control +// for the outputs in this output group. When you don't use this setting, S3 +// automatically applies the default access control list PRIVATE. +type S3DestinationAccessControl struct { + _ struct{} `type:"structure"` + + // Choose an Amazon S3 canned ACL for MediaConvert to apply to this output. + CannedAcl S3ObjectCannedAcl `locationName:"cannedAcl" type:"string" enum:"true"` +} + +// String returns the string representation +func (s S3DestinationAccessControl) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s S3DestinationAccessControl) MarshalFields(e protocol.FieldEncoder) error { + if len(s.CannedAcl) > 0 { + v := s.CannedAcl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "cannedAcl", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + // Settings associated with S3 destination type S3DestinationSettings struct { _ struct{} `type:"structure"` + // Optional. Have MediaConvert automatically apply Amazon S3 access control + // for the outputs in this output group. When you don't use this setting, S3 + // automatically applies the default access control list PRIVATE. + AccessControl *S3DestinationAccessControl `locationName:"accessControl" type:"structure"` + // Settings for how your job outputs are encrypted as they are uploaded to Amazon // S3. Encryption *S3EncryptionSettings `locationName:"encryption" type:"structure"` @@ -10906,6 +11426,12 @@ func (s S3DestinationSettings) String() string { // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s S3DestinationSettings) MarshalFields(e protocol.FieldEncoder) error { + if s.AccessControl != nil { + v := s.AccessControl + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "accessControl", v, metadata) + } if s.Encryption != nil { v := s.Encryption @@ -11990,6 +12516,9 @@ type VideoPreprocessor struct { // picture. Deinterlacer *Deinterlacer `locationName:"deinterlacer" type:"structure"` + // Enable Dolby Vision feature to produce Dolby Vision compatible video output. + DolbyVision *DolbyVision `locationName:"dolbyVision" type:"structure"` + // Enable the Image inserter (ImageInserter) feature to include a graphic overlay // on your video. Enable or disable this feature for each output individually. // This setting is disabled by default. @@ -12054,6 +12583,12 @@ func (s VideoPreprocessor) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "deinterlacer", v, metadata) } + if s.DolbyVision != nil { + v := s.DolbyVision + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "dolbyVision", v, metadata) + } if s.ImageInserter != nil { v := s.ImageInserter diff --git a/service/mediastore/api_op_CreateContainer.go b/service/mediastore/api_op_CreateContainer.go index 57d389e5a2b..aca5bad6e2d 100644 --- a/service/mediastore/api_op_CreateContainer.go +++ b/service/mediastore/api_op_CreateContainer.go @@ -26,7 +26,7 @@ type CreateContainerInput struct { // and the tag value represents a specific value within that category (such // as "test," "development," or "production"). You can add up to 50 tags to // each container. For more information about tagging, including naming and - // usage conventions, see Tagging Resources in MediaStore (https://aws.amazon.com/documentation/mediastore/tagging). + // usage conventions, see Tagging Resources in MediaStore (https://docs.aws.amazon.com/mediastore/latest/ug/tagging.html). Tags []Tag `type:"list"` } diff --git a/service/mediastore/api_op_TagResource.go b/service/mediastore/api_op_TagResource.go index 3b3594f9d58..d73de052b4f 100644 --- a/service/mediastore/api_op_TagResource.go +++ b/service/mediastore/api_op_TagResource.go @@ -82,7 +82,7 @@ const opTagResource = "TagResource" // might be "customer" and the tag value might be "companyA." You can specify // one or more tags to add to each container. You can add up to 50 tags to each // container. For more information about tagging, including naming and usage -// conventions, see Tagging Resources in MediaStore (https://aws.amazon.com/documentation/mediastore/tagging). +// conventions, see Tagging Resources in MediaStore (https://docs.aws.amazon.com/mediastore/latest/ug/tagging.html). // // // Example sending a request using TagResourceRequest. // req := client.TagResourceRequest(params) diff --git a/service/mediastore/api_types.go b/service/mediastore/api_types.go index 756058d34e3..1d67941e362 100644 --- a/service/mediastore/api_types.go +++ b/service/mediastore/api_types.go @@ -137,13 +137,15 @@ func (s *CorsRule) Validate() error { // a specific value within that category (such as "test," "development," or // "production"). You can add up to 50 tags to each container. For more information // about tagging, including naming and usage conventions, see Tagging Resources -// in MediaStore (https://aws.amazon.com/documentation/mediastore/tagging). +// in MediaStore (https://docs.aws.amazon.com/mediastore/latest/ug/tagging.html). type Tag struct { _ struct{} `type:"structure"` // Part of the key:value pair that defines a tag. You can use a tag key to describe // a category of information, such as "customer." Tag keys are case-sensitive. - Key *string `min:"1" type:"string"` + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` // Part of the key:value pair that defines a tag. You can use a tag value to // describe a specific value within a category, such as "companyA" or "companyB." @@ -159,6 +161,10 @@ func (s Tag) String() string { // Validate inspects the fields of the type to determine if they are valid. func (s *Tag) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Tag"} + + if s.Key == nil { + invalidParams.Add(aws.NewErrParamRequired("Key")) + } if s.Key != nil && len(*s.Key) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Key", 1)) } diff --git a/service/migrationhub/api_doc.go b/service/migrationhub/api_doc.go index 80121638f0f..2ac732c462a 100644 --- a/service/migrationhub/api_doc.go +++ b/service/migrationhub/api_doc.go @@ -7,6 +7,10 @@ // status and integrate your resource-specific migration tool by providing a // programmatic interface to Migration Hub. // +// Remember that you must set your AWS Migration Hub home region before you +// call any of these APIs, or a HomeRegionNotSetException error will be returned. +// Also, you must make the API calls while in your home region. +// // See https://docs.aws.amazon.com/goto/WebAPI/AWSMigrationHub-2017-05-31 for more information on this service. // // See migrationhub package documentation for more information. diff --git a/service/migrationhub/api_errors.go b/service/migrationhub/api_errors.go index 6eac5c2709f..dbc0a69d7df 100644 --- a/service/migrationhub/api_errors.go +++ b/service/migrationhub/api_errors.go @@ -17,11 +17,17 @@ const ( // flag is set to "true". ErrCodeDryRunOperation = "DryRunOperation" + // ErrCodeHomeRegionNotSetException for service response error code + // "HomeRegionNotSetException". + // + // The home region is not set. Set the home region to continue. + ErrCodeHomeRegionNotSetException = "HomeRegionNotSetException" + // ErrCodeInternalServerError for service response error code // "InternalServerError". // - // Exception raised when there is an internal, configuration, or dependency - // error encountered. + // Exception raised when an internal, configuration, or dependency error is + // encountered. ErrCodeInternalServerError = "InternalServerError" // ErrCodeInvalidInputException for service response error code @@ -34,17 +40,18 @@ const ( // ErrCodePolicyErrorException for service response error code // "PolicyErrorException". // - // Exception raised when there are problems accessing ADS (Application Discovery - // Service); most likely due to a misconfigured policy or the migrationhub-discovery - // role is missing or not configured correctly. + // Exception raised when there are problems accessing Application Discovery + // Service (Application Discovery Service); most likely due to a misconfigured + // policy or the migrationhub-discovery role is missing or not configured correctly. ErrCodePolicyErrorException = "PolicyErrorException" // ErrCodeResourceNotFoundException for service response error code // "ResourceNotFoundException". // - // Exception raised when the request references a resource (ADS configuration, - // update stream, migration task, etc.) that does not exist in ADS (Application - // Discovery Service) or in Migration Hub's repository. + // Exception raised when the request references a resource (Application Discovery + // Service configuration, update stream, migration task, etc.) that does not + // exist in Application Discovery Service (Application Discovery Service) or + // in Migration Hub's repository. ErrCodeResourceNotFoundException = "ResourceNotFoundException" // ErrCodeServiceUnavailableException for service response error code diff --git a/service/migrationhub/api_op_AssociateCreatedArtifact.go b/service/migrationhub/api_op_AssociateCreatedArtifact.go index 3ec4f1c6746..e074c8dde9f 100644 --- a/service/migrationhub/api_op_AssociateCreatedArtifact.go +++ b/service/migrationhub/api_op_AssociateCreatedArtifact.go @@ -22,7 +22,8 @@ type AssociateCreatedArtifactInput struct { // to test if the caller has permission to make the call. DryRun *bool `type:"boolean"` - // Unique identifier that references the migration task. + // Unique identifier that references the migration task. Do not store personal + // data in this field. // // MigrationTaskName is a required field MigrationTaskName *string `min:"1" type:"string" required:"true"` diff --git a/service/migrationhub/api_op_AssociateDiscoveredResource.go b/service/migrationhub/api_op_AssociateDiscoveredResource.go index 97a18619642..31fe7ff4f55 100644 --- a/service/migrationhub/api_op_AssociateDiscoveredResource.go +++ b/service/migrationhub/api_op_AssociateDiscoveredResource.go @@ -21,7 +21,8 @@ type AssociateDiscoveredResourceInput struct { // to test if the caller has permission to make the call. DryRun *bool `type:"boolean"` - // The identifier given to the MigrationTask. + // The identifier given to the MigrationTask. Do not store personal data in + // this field. // // MigrationTaskName is a required field MigrationTaskName *string `min:"1" type:"string" required:"true"` @@ -84,8 +85,8 @@ const opAssociateDiscoveredResource = "AssociateDiscoveredResource" // AssociateDiscoveredResourceRequest returns a request value for making API operation for // AWS Migration Hub. // -// Associates a discovered resource ID from Application Discovery Service (ADS) -// with a migration task. +// Associates a discovered resource ID from Application Discovery Service with +// a migration task. // // // Example sending a request using AssociateDiscoveredResourceRequest. // req := client.AssociateDiscoveredResourceRequest(params) diff --git a/service/migrationhub/api_op_CreateProgressUpdateStream.go b/service/migrationhub/api_op_CreateProgressUpdateStream.go index 2db5069a91f..6d06a049bb7 100644 --- a/service/migrationhub/api_op_CreateProgressUpdateStream.go +++ b/service/migrationhub/api_op_CreateProgressUpdateStream.go @@ -16,7 +16,8 @@ type CreateProgressUpdateStreamInput struct { // to test if the caller has permission to make the call. DryRun *bool `type:"boolean"` - // The name of the ProgressUpdateStream. + // The name of the ProgressUpdateStream. Do not store personal data in this + // field. // // ProgressUpdateStreamName is a required field ProgressUpdateStreamName *string `min:"1" type:"string" required:"true"` diff --git a/service/migrationhub/api_op_DeleteProgressUpdateStream.go b/service/migrationhub/api_op_DeleteProgressUpdateStream.go index 087710884f9..b873bd11d85 100644 --- a/service/migrationhub/api_op_DeleteProgressUpdateStream.go +++ b/service/migrationhub/api_op_DeleteProgressUpdateStream.go @@ -16,7 +16,8 @@ type DeleteProgressUpdateStreamInput struct { // to test if the caller has permission to make the call. DryRun *bool `type:"boolean"` - // The name of the ProgressUpdateStream. + // The name of the ProgressUpdateStream. Do not store personal data in this + // field. // // ProgressUpdateStreamName is a required field ProgressUpdateStreamName *string `min:"1" type:"string" required:"true"` @@ -73,7 +74,7 @@ const opDeleteProgressUpdateStream = "DeleteProgressUpdateStream" // ListProgressUpdateStreams call. // // * CreateProgressUpdateStream, ImportMigrationTask, NotifyMigrationTaskState, -// and all Associate[*] APIs realted to the tasks belonging to the stream +// and all Associate[*] APIs related to the tasks belonging to the stream // will throw "InvalidInputException" if the stream of the same name is in // the process of being deleted. // diff --git a/service/migrationhub/api_op_DescribeApplicationState.go b/service/migrationhub/api_op_DescribeApplicationState.go index 4cc75195b24..ea5e957610a 100644 --- a/service/migrationhub/api_op_DescribeApplicationState.go +++ b/service/migrationhub/api_op_DescribeApplicationState.go @@ -13,7 +13,8 @@ import ( type DescribeApplicationStateInput struct { _ struct{} `type:"structure"` - // The configurationId in ADS that uniquely identifies the grouped application. + // The configurationId in Application Discovery Service that uniquely identifies + // the grouped application. // // ApplicationId is a required field ApplicationId *string `min:"1" type:"string" required:"true"` diff --git a/service/migrationhub/api_op_DescribeMigrationTask.go b/service/migrationhub/api_op_DescribeMigrationTask.go index cb9b7fb9775..4a8f57177b5 100644 --- a/service/migrationhub/api_op_DescribeMigrationTask.go +++ b/service/migrationhub/api_op_DescribeMigrationTask.go @@ -12,7 +12,8 @@ import ( type DescribeMigrationTaskInput struct { _ struct{} `type:"structure"` - // The identifier given to the MigrationTask. + // The identifier given to the MigrationTask. Do not store personal data in + // this field. // // MigrationTaskName is a required field MigrationTaskName *string `min:"1" type:"string" required:"true"` diff --git a/service/migrationhub/api_op_DisassociateCreatedArtifact.go b/service/migrationhub/api_op_DisassociateCreatedArtifact.go index 50c2578e62c..7d1cbd9b86d 100644 --- a/service/migrationhub/api_op_DisassociateCreatedArtifact.go +++ b/service/migrationhub/api_op_DisassociateCreatedArtifact.go @@ -23,7 +23,7 @@ type DisassociateCreatedArtifactInput struct { DryRun *bool `type:"boolean"` // Unique identifier that references the migration task to be disassociated - // with the artifact. + // with the artifact. Do not store personal data in this field. // // MigrationTaskName is a required field MigrationTaskName *string `min:"1" type:"string" required:"true"` diff --git a/service/migrationhub/api_op_DisassociateDiscoveredResource.go b/service/migrationhub/api_op_DisassociateDiscoveredResource.go index 5c57fd4faf8..5db934f4c86 100644 --- a/service/migrationhub/api_op_DisassociateDiscoveredResource.go +++ b/service/migrationhub/api_op_DisassociateDiscoveredResource.go @@ -12,7 +12,7 @@ import ( type DisassociateDiscoveredResourceInput struct { _ struct{} `type:"structure"` - // ConfigurationId of the ADS resource to be disassociated. + // ConfigurationId of the Application Discovery Service resource to be disassociated. // // ConfigurationId is a required field ConfigurationId *string `min:"1" type:"string" required:"true"` @@ -21,7 +21,8 @@ type DisassociateDiscoveredResourceInput struct { // to test if the caller has permission to make the call. DryRun *bool `type:"boolean"` - // The identifier given to the MigrationTask. + // The identifier given to the MigrationTask. Do not store personal data in + // this field. // // MigrationTaskName is a required field MigrationTaskName *string `min:"1" type:"string" required:"true"` @@ -82,8 +83,8 @@ const opDisassociateDiscoveredResource = "DisassociateDiscoveredResource" // DisassociateDiscoveredResourceRequest returns a request value for making API operation for // AWS Migration Hub. // -// Disassociate an Application Discovery Service (ADS) discovered resource from -// a migration task. +// Disassociate an Application Discovery Service discovered resource from a +// migration task. // // // Example sending a request using DisassociateDiscoveredResourceRequest. // req := client.DisassociateDiscoveredResourceRequest(params) diff --git a/service/migrationhub/api_op_ImportMigrationTask.go b/service/migrationhub/api_op_ImportMigrationTask.go index 89bd104bfe2..efc6c1437c0 100644 --- a/service/migrationhub/api_op_ImportMigrationTask.go +++ b/service/migrationhub/api_op_ImportMigrationTask.go @@ -16,12 +16,13 @@ type ImportMigrationTaskInput struct { // to test if the caller has permission to make the call. DryRun *bool `type:"boolean"` - // Unique identifier that references the migration task. + // Unique identifier that references the migration task. Do not store personal + // data in this field. // // MigrationTaskName is a required field MigrationTaskName *string `min:"1" type:"string" required:"true"` - // The name of the ProgressUpdateStream. + // The name of the ProgressUpdateStream. > // // ProgressUpdateStream is a required field ProgressUpdateStream *string `min:"1" type:"string" required:"true"` diff --git a/service/migrationhub/api_op_ListCreatedArtifacts.go b/service/migrationhub/api_op_ListCreatedArtifacts.go index 21ee6536419..7a0060d09eb 100644 --- a/service/migrationhub/api_op_ListCreatedArtifacts.go +++ b/service/migrationhub/api_op_ListCreatedArtifacts.go @@ -15,7 +15,8 @@ type ListCreatedArtifactsInput struct { // Maximum number of results to be returned per page. MaxResults *int64 `min:"1" type:"integer"` - // Unique identifier that references the migration task. + // Unique identifier that references the migration task. Do not store personal + // data in this field. // // MigrationTaskName is a required field MigrationTaskName *string `min:"1" type:"string" required:"true"` @@ -108,6 +109,12 @@ func (c *Client) ListCreatedArtifactsRequest(input *ListCreatedArtifactsInput) L Name: opListCreatedArtifacts, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -142,6 +149,53 @@ func (r ListCreatedArtifactsRequest) Send(ctx context.Context) (*ListCreatedArti return resp, nil } +// NewListCreatedArtifactsRequestPaginator returns a paginator for ListCreatedArtifacts. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListCreatedArtifactsRequest(input) +// p := migrationhub.NewListCreatedArtifactsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListCreatedArtifactsPaginator(req ListCreatedArtifactsRequest) ListCreatedArtifactsPaginator { + return ListCreatedArtifactsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListCreatedArtifactsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListCreatedArtifactsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListCreatedArtifactsPaginator struct { + aws.Pager +} + +func (p *ListCreatedArtifactsPaginator) CurrentPage() *ListCreatedArtifactsOutput { + return p.Pager.CurrentPage().(*ListCreatedArtifactsOutput) +} + // ListCreatedArtifactsResponse is the response type for the // ListCreatedArtifacts API operation. type ListCreatedArtifactsResponse struct { diff --git a/service/migrationhub/api_op_ListDiscoveredResources.go b/service/migrationhub/api_op_ListDiscoveredResources.go index 05fdfaf4ad1..6c9a2177e0a 100644 --- a/service/migrationhub/api_op_ListDiscoveredResources.go +++ b/service/migrationhub/api_op_ListDiscoveredResources.go @@ -15,7 +15,7 @@ type ListDiscoveredResourcesInput struct { // The maximum number of results returned per page. MaxResults *int64 `min:"1" type:"integer"` - // The name of the MigrationTask. + // The name of the MigrationTask. Do not store personal data in this field. // // MigrationTaskName is a required field MigrationTaskName *string `min:"1" type:"string" required:"true"` @@ -99,6 +99,12 @@ func (c *Client) ListDiscoveredResourcesRequest(input *ListDiscoveredResourcesIn Name: opListDiscoveredResources, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -133,6 +139,53 @@ func (r ListDiscoveredResourcesRequest) Send(ctx context.Context) (*ListDiscover return resp, nil } +// NewListDiscoveredResourcesRequestPaginator returns a paginator for ListDiscoveredResources. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListDiscoveredResourcesRequest(input) +// p := migrationhub.NewListDiscoveredResourcesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListDiscoveredResourcesPaginator(req ListDiscoveredResourcesRequest) ListDiscoveredResourcesPaginator { + return ListDiscoveredResourcesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListDiscoveredResourcesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListDiscoveredResourcesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListDiscoveredResourcesPaginator struct { + aws.Pager +} + +func (p *ListDiscoveredResourcesPaginator) CurrentPage() *ListDiscoveredResourcesOutput { + return p.Pager.CurrentPage().(*ListDiscoveredResourcesOutput) +} + // ListDiscoveredResourcesResponse is the response type for the // ListDiscoveredResources API operation. type ListDiscoveredResourcesResponse struct { diff --git a/service/migrationhub/api_op_ListMigrationTasks.go b/service/migrationhub/api_op_ListMigrationTasks.go index 5d0136c0544..afb27d88e4a 100644 --- a/service/migrationhub/api_op_ListMigrationTasks.go +++ b/service/migrationhub/api_op_ListMigrationTasks.go @@ -90,6 +90,12 @@ func (c *Client) ListMigrationTasksRequest(input *ListMigrationTasksInput) ListM Name: opListMigrationTasks, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -124,6 +130,53 @@ func (r ListMigrationTasksRequest) Send(ctx context.Context) (*ListMigrationTask return resp, nil } +// NewListMigrationTasksRequestPaginator returns a paginator for ListMigrationTasks. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListMigrationTasksRequest(input) +// p := migrationhub.NewListMigrationTasksRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListMigrationTasksPaginator(req ListMigrationTasksRequest) ListMigrationTasksPaginator { + return ListMigrationTasksPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListMigrationTasksInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListMigrationTasksPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListMigrationTasksPaginator struct { + aws.Pager +} + +func (p *ListMigrationTasksPaginator) CurrentPage() *ListMigrationTasksOutput { + return p.Pager.CurrentPage().(*ListMigrationTasksOutput) +} + // ListMigrationTasksResponse is the response type for the // ListMigrationTasks API operation. type ListMigrationTasksResponse struct { diff --git a/service/migrationhub/api_op_ListProgressUpdateStreams.go b/service/migrationhub/api_op_ListProgressUpdateStreams.go index 02f6533afb2..7e0083ccf6a 100644 --- a/service/migrationhub/api_op_ListProgressUpdateStreams.go +++ b/service/migrationhub/api_op_ListProgressUpdateStreams.go @@ -77,6 +77,12 @@ func (c *Client) ListProgressUpdateStreamsRequest(input *ListProgressUpdateStrea Name: opListProgressUpdateStreams, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -111,6 +117,53 @@ func (r ListProgressUpdateStreamsRequest) Send(ctx context.Context) (*ListProgre return resp, nil } +// NewListProgressUpdateStreamsRequestPaginator returns a paginator for ListProgressUpdateStreams. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListProgressUpdateStreamsRequest(input) +// p := migrationhub.NewListProgressUpdateStreamsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListProgressUpdateStreamsPaginator(req ListProgressUpdateStreamsRequest) ListProgressUpdateStreamsPaginator { + return ListProgressUpdateStreamsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListProgressUpdateStreamsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListProgressUpdateStreamsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListProgressUpdateStreamsPaginator struct { + aws.Pager +} + +func (p *ListProgressUpdateStreamsPaginator) CurrentPage() *ListProgressUpdateStreamsOutput { + return p.Pager.CurrentPage().(*ListProgressUpdateStreamsOutput) +} + // ListProgressUpdateStreamsResponse is the response type for the // ListProgressUpdateStreams API operation. type ListProgressUpdateStreamsResponse struct { diff --git a/service/migrationhub/api_op_NotifyApplicationState.go b/service/migrationhub/api_op_NotifyApplicationState.go index 037ee89d689..5b085191293 100644 --- a/service/migrationhub/api_op_NotifyApplicationState.go +++ b/service/migrationhub/api_op_NotifyApplicationState.go @@ -4,6 +4,7 @@ package migrationhub import ( "context" + "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/internal/awsutil" @@ -12,7 +13,8 @@ import ( type NotifyApplicationStateInput struct { _ struct{} `type:"structure"` - // The configurationId in ADS that uniquely identifies the grouped application. + // The configurationId in Application Discovery Service that uniquely identifies + // the grouped application. // // ApplicationId is a required field ApplicationId *string `min:"1" type:"string" required:"true"` @@ -25,6 +27,9 @@ type NotifyApplicationStateInput struct { // // Status is a required field Status ApplicationStatus `type:"string" required:"true" enum:"true"` + + // The timestamp when the application state changed. + UpdateDateTime *time.Time `type:"timestamp"` } // String returns the string representation diff --git a/service/migrationhub/api_op_NotifyMigrationTaskState.go b/service/migrationhub/api_op_NotifyMigrationTaskState.go index 5dca32da7f5..d7a93f99b63 100644 --- a/service/migrationhub/api_op_NotifyMigrationTaskState.go +++ b/service/migrationhub/api_op_NotifyMigrationTaskState.go @@ -17,7 +17,8 @@ type NotifyMigrationTaskStateInput struct { // to test if the caller has permission to make the call. DryRun *bool `type:"boolean"` - // Unique identifier that references the migration task. + // Unique identifier that references the migration task. Do not store personal + // data in this field. // // MigrationTaskName is a required field MigrationTaskName *string `min:"1" type:"string" required:"true"` diff --git a/service/migrationhub/api_op_PutResourceAttributes.go b/service/migrationhub/api_op_PutResourceAttributes.go index 95021d1417c..f4c96a434cd 100644 --- a/service/migrationhub/api_op_PutResourceAttributes.go +++ b/service/migrationhub/api_op_PutResourceAttributes.go @@ -17,7 +17,8 @@ type PutResourceAttributesInput struct { // to test if the caller has permission to make the call. DryRun *bool `type:"boolean"` - // Unique identifier that references the migration task. + // Unique identifier that references the migration task. Do not store personal + // data in this field. // // MigrationTaskName is a required field MigrationTaskName *string `min:"1" type:"string" required:"true"` @@ -28,8 +29,7 @@ type PutResourceAttributesInput struct { ProgressUpdateStream *string `min:"1" type:"string" required:"true"` // Information about the resource that is being migrated. This data will be - // used to map the task to a resource in the Application Discovery Service (ADS)'s - // repository. + // used to map the task to a resource in the Application Discovery Service repository. // // Takes the object array of ResourceAttribute where the Type field is reserved // for the following values: IPV4_ADDRESS | IPV6_ADDRESS | MAC_ADDRESS | FQDN @@ -41,7 +41,7 @@ type PutResourceAttributesInput struct { // is required that VM_MANAGER_ID, as a minimum, is always set. If VM_MANAGER_ID // is not set, then all "VM" fields will be discarded and "VM" fields will // not be used for matching the migration task to a server in Application - // Discovery Service (ADS)'s repository. See the Example (https://docs.aws.amazon.com/migrationhub/latest/ug/API_PutResourceAttributes.html#API_PutResourceAttributes_Examples) + // Discovery Service repository. See the Example (https://docs.aws.amazon.com/migrationhub/latest/ug/API_PutResourceAttributes.html#API_PutResourceAttributes_Examples) // section below for a use case of specifying "VM" related values. // // * If a server you are trying to match has multiple IP or MAC addresses, @@ -110,14 +110,14 @@ const opPutResourceAttributes = "PutResourceAttributes" // AWS Migration Hub. // // Provides identifying details of the resource being migrated so that it can -// be associated in the Application Discovery Service (ADS)'s repository. This -// association occurs asynchronously after PutResourceAttributes returns. +// be associated in the Application Discovery Service repository. This association +// occurs asynchronously after PutResourceAttributes returns. // // * Keep in mind that subsequent calls to PutResourceAttributes will override // previously stored attributes. For example, if it is first called with // a MAC address, but later, it is desired to add an IP address, it will // then be required to call it with both the IP and MAC addresses to prevent -// overiding the MAC address. +// overriding the MAC address. // // * Note the instructions regarding the special use case of the ResourceAttributeList // (https://docs.aws.amazon.com/migrationhub/latest/ug/API_PutResourceAttributes.html#migrationhub-PutResourceAttributes-request-ResourceAttributeList) diff --git a/service/migrationhub/api_types.go b/service/migrationhub/api_types.go index 01eb205ec1d..7852c64ffad 100644 --- a/service/migrationhub/api_types.go +++ b/service/migrationhub/api_types.go @@ -53,7 +53,8 @@ func (s *CreatedArtifact) Validate() error { type DiscoveredResource struct { _ struct{} `type:"structure"` - // The configurationId in ADS that uniquely identifies the on-premise resource. + // The configurationId in Application Discovery Service that uniquely identifies + // the on-premise resource. // // ConfigurationId is a required field ConfigurationId *string `min:"1" type:"string" required:"true"` @@ -89,12 +90,15 @@ func (s *DiscoveredResource) Validate() error { type MigrationTask struct { _ struct{} `type:"structure"` - // Unique identifier that references the migration task. + // Unique identifier that references the migration task. Do not store personal + // data in this field. MigrationTaskName *string `min:"1" type:"string"` // A name that identifies the vendor of the migration tool being used. ProgressUpdateStream *string `min:"1" type:"string"` + // Information about the resource that is being migrated. This data will be + // used to map the task to a resource in the Application Discovery Service repository. ResourceAttributeList []ResourceAttribute `type:"list"` // Task object encapsulating task information. @@ -114,9 +118,11 @@ func (s MigrationTask) String() string { type MigrationTaskSummary struct { _ struct{} `type:"structure"` - // Unique identifier that references the migration task. + // Unique identifier that references the migration task. Do not store personal + // data in this field. MigrationTaskName *string `min:"1" type:"string"` + // Indication of the percentage completion of the task. ProgressPercent *int64 `type:"integer"` // An AWS resource used for access control. It should uniquely identify the @@ -143,7 +149,8 @@ func (s MigrationTaskSummary) String() string { type ProgressUpdateStreamSummary struct { _ struct{} `type:"structure"` - // The name of the ProgressUpdateStream. + // The name of the ProgressUpdateStream. Do not store personal data in this + // field. ProgressUpdateStreamName *string `min:"1" type:"string"` } diff --git a/service/migrationhubconfig/api_client.go b/service/migrationhubconfig/api_client.go new file mode 100644 index 00000000000..9be0688cc44 --- /dev/null +++ b/service/migrationhubconfig/api_client.go @@ -0,0 +1,81 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package migrationhubconfig + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/private/protocol/jsonrpc" +) + +// Client provides the API operation methods for making requests to +// AWS Migration Hub Config. See this package's package overview docs +// for details on the service. +// +// The client's methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type Client struct { + *aws.Client +} + +// Used for custom client initialization logic +var initClient func(*Client) + +// Used for custom request initialization logic +var initRequest func(*Client, *aws.Request) + +const ( + ServiceName = "AWS Migration Hub Config" // Service's name + ServiceID = "MigrationHubConfig" // Service's identifier + EndpointsID = "migrationhub-config" // Service's Endpoint identifier +) + +// New creates a new instance of the client from the provided Config. +// +// Example: +// // Create a client from just a config. +// svc := migrationhubconfig.New(myConfig) +func New(config aws.Config) *Client { + svc := &Client{ + Client: aws.NewClient( + config, + aws.Metadata{ + ServiceName: ServiceName, + ServiceID: ServiceID, + EndpointsID: EndpointsID, + SigningName: "mgh", + SigningRegion: config.Region, + APIVersion: "2019-06-30", + JSONVersion: "1.1", + TargetPrefix: "AWSMigrationHubMultiAccountService", + }, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc) + } + + return svc +} + +// newRequest creates a new request for a client operation and runs any +// custom request initialization. +func (c *Client) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(c, req) + } + + return req +} diff --git a/service/migrationhubconfig/api_doc.go b/service/migrationhubconfig/api_doc.go new file mode 100644 index 00000000000..b837bd88807 --- /dev/null +++ b/service/migrationhubconfig/api_doc.go @@ -0,0 +1,49 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package migrationhubconfig provides the client and types for making API +// requests to AWS Migration Hub Config. +// +// The AWS Migration Hub home region APIs are available specifically for working +// with your Migration Hub home region. You can use these APIs to determine +// a home region, as well as to create and work with controls that describe +// the home region. +// +// You can use these APIs within your home region only. If you call these APIs +// from outside your home region, your calls are rejected, except for the ability +// to register your agents and connectors. +// +// You must call GetHomeRegion at least once before you call any other AWS Application +// Discovery Service and AWS Migration Hub APIs, to obtain the account's Migration +// Hub home region. +// +// The StartDataCollection API call in AWS Application Discovery Service allows +// your agents and connectors to begin collecting data that flows directly into +// the home region, and it will prevent you from enabling data collection information +// to be sent outside the home region. +// +// For specific API usage, see the sections that follow in this AWS Migration +// Hub Home Region API reference. +// +// The Migration Hub Home Region APIs do not support AWS Organizations. +// +// See https://docs.aws.amazon.com/goto/WebAPI/migrationhub-config-2019-06-30 for more information on this service. +// +// See migrationhubconfig package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/migrationhubconfig/ +// +// Using the Client +// +// To use AWS Migration Hub Config with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Migration Hub Config client for more information on +// creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/migrationhubconfig/#New +package migrationhubconfig diff --git a/service/migrationhubconfig/api_enums.go b/service/migrationhubconfig/api_enums.go new file mode 100644 index 00000000000..645c1fd5a0d --- /dev/null +++ b/service/migrationhubconfig/api_enums.go @@ -0,0 +1,19 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package migrationhubconfig + +type TargetType string + +// Enum values for TargetType +const ( + TargetTypeAccount TargetType = "ACCOUNT" +) + +func (enum TargetType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum TargetType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} diff --git a/service/migrationhubconfig/api_errors.go b/service/migrationhubconfig/api_errors.go new file mode 100644 index 00000000000..887fcd8a180 --- /dev/null +++ b/service/migrationhubconfig/api_errors.go @@ -0,0 +1,40 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package migrationhubconfig + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have sufficient access to perform this action. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeDryRunOperation for service response error code + // "DryRunOperation". + // + // Exception raised to indicate that authorization of an action was successful, + // when the DryRun flag is set to true. + ErrCodeDryRunOperation = "DryRunOperation" + + // ErrCodeInternalServerError for service response error code + // "InternalServerError". + // + // Exception raised when an internal, configuration, or dependency error is + // encountered. + ErrCodeInternalServerError = "InternalServerError" + + // ErrCodeInvalidInputException for service response error code + // "InvalidInputException". + // + // Exception raised when the provided input violates a policy constraint or + // is entered in the wrong format or data type. + ErrCodeInvalidInputException = "InvalidInputException" + + // ErrCodeServiceUnavailableException for service response error code + // "ServiceUnavailableException". + // + // Exception raised when a request fails due to temporary unavailability of + // the service. + ErrCodeServiceUnavailableException = "ServiceUnavailableException" +) diff --git a/service/migrationhubconfig/api_op_CreateHomeRegionControl.go b/service/migrationhubconfig/api_op_CreateHomeRegionControl.go new file mode 100644 index 00000000000..bd0f48bcf05 --- /dev/null +++ b/service/migrationhubconfig/api_op_CreateHomeRegionControl.go @@ -0,0 +1,141 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package migrationhubconfig + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type CreateHomeRegionControlInput struct { + _ struct{} `type:"structure"` + + // Optional Boolean flag to indicate whether any effect should take place. It + // tests whether the caller has permission to make the call. + DryRun *bool `type:"boolean"` + + // The name of the home region of the calling account. + // + // HomeRegion is a required field + HomeRegion *string `min:"1" type:"string" required:"true"` + + // The account for which this command sets up a home region control. The Target + // is always of type ACCOUNT. + // + // Target is a required field + Target *Target `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateHomeRegionControlInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateHomeRegionControlInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateHomeRegionControlInput"} + + if s.HomeRegion == nil { + invalidParams.Add(aws.NewErrParamRequired("HomeRegion")) + } + if s.HomeRegion != nil && len(*s.HomeRegion) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("HomeRegion", 1)) + } + + if s.Target == nil { + invalidParams.Add(aws.NewErrParamRequired("Target")) + } + if s.Target != nil { + if err := s.Target.Validate(); err != nil { + invalidParams.AddNested("Target", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateHomeRegionControlOutput struct { + _ struct{} `type:"structure"` + + // This object is the HomeRegionControl object that's returned by a successful + // call to CreateHomeRegionControl. + HomeRegionControl *HomeRegionControl `type:"structure"` +} + +// String returns the string representation +func (s CreateHomeRegionControlOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateHomeRegionControl = "CreateHomeRegionControl" + +// CreateHomeRegionControlRequest returns a request value for making API operation for +// AWS Migration Hub Config. +// +// This API sets up the home region for the calling account only. +// +// // Example sending a request using CreateHomeRegionControlRequest. +// req := client.CreateHomeRegionControlRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/migrationhub-config-2019-06-30/CreateHomeRegionControl +func (c *Client) CreateHomeRegionControlRequest(input *CreateHomeRegionControlInput) CreateHomeRegionControlRequest { + op := &aws.Operation{ + Name: opCreateHomeRegionControl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateHomeRegionControlInput{} + } + + req := c.newRequest(op, input, &CreateHomeRegionControlOutput{}) + return CreateHomeRegionControlRequest{Request: req, Input: input, Copy: c.CreateHomeRegionControlRequest} +} + +// CreateHomeRegionControlRequest is the request type for the +// CreateHomeRegionControl API operation. +type CreateHomeRegionControlRequest struct { + *aws.Request + Input *CreateHomeRegionControlInput + Copy func(*CreateHomeRegionControlInput) CreateHomeRegionControlRequest +} + +// Send marshals and sends the CreateHomeRegionControl API request. +func (r CreateHomeRegionControlRequest) Send(ctx context.Context) (*CreateHomeRegionControlResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateHomeRegionControlResponse{ + CreateHomeRegionControlOutput: r.Request.Data.(*CreateHomeRegionControlOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateHomeRegionControlResponse is the response type for the +// CreateHomeRegionControl API operation. +type CreateHomeRegionControlResponse struct { + *CreateHomeRegionControlOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateHomeRegionControl request. +func (r *CreateHomeRegionControlResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/migrationhubconfig/api_op_DescribeHomeRegionControls.go b/service/migrationhubconfig/api_op_DescribeHomeRegionControls.go new file mode 100644 index 00000000000..6550c6fc24d --- /dev/null +++ b/service/migrationhubconfig/api_op_DescribeHomeRegionControls.go @@ -0,0 +1,201 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package migrationhubconfig + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeHomeRegionControlsInput struct { + _ struct{} `type:"structure"` + + // The ControlID is a unique identifier string of your HomeRegionControl object. + ControlId *string `min:"1" type:"string"` + + // The name of the home region you'd like to view. + HomeRegion *string `min:"1" type:"string"` + + // The maximum number of filtering results to display per page. + MaxResults *int64 `min:"1" type:"integer"` + + // If a NextToken was returned by a previous call, more results are available. + // To retrieve the next page of results, make the call again using the returned + // token in NextToken. + NextToken *string `type:"string"` + + // The target parameter specifies the identifier to which the home region is + // applied, which is always of type ACCOUNT. It applies the home region to the + // current ACCOUNT. + Target *Target `type:"structure"` +} + +// String returns the string representation +func (s DescribeHomeRegionControlsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeHomeRegionControlsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeHomeRegionControlsInput"} + if s.ControlId != nil && len(*s.ControlId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ControlId", 1)) + } + if s.HomeRegion != nil && len(*s.HomeRegion) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("HomeRegion", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + if s.Target != nil { + if err := s.Target.Validate(); err != nil { + invalidParams.AddNested("Target", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeHomeRegionControlsOutput struct { + _ struct{} `type:"structure"` + + // An array that contains your HomeRegionControl objects. + HomeRegionControls []HomeRegionControl `type:"list"` + + // If a NextToken was returned by a previous call, more results are available. + // To retrieve the next page of results, make the call again using the returned + // token in NextToken. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeHomeRegionControlsOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeHomeRegionControls = "DescribeHomeRegionControls" + +// DescribeHomeRegionControlsRequest returns a request value for making API operation for +// AWS Migration Hub Config. +// +// This API permits filtering on the ControlId, HomeRegion, and RegionControlScope +// fields. +// +// // Example sending a request using DescribeHomeRegionControlsRequest. +// req := client.DescribeHomeRegionControlsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/migrationhub-config-2019-06-30/DescribeHomeRegionControls +func (c *Client) DescribeHomeRegionControlsRequest(input *DescribeHomeRegionControlsInput) DescribeHomeRegionControlsRequest { + op := &aws.Operation{ + Name: opDescribeHomeRegionControls, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeHomeRegionControlsInput{} + } + + req := c.newRequest(op, input, &DescribeHomeRegionControlsOutput{}) + return DescribeHomeRegionControlsRequest{Request: req, Input: input, Copy: c.DescribeHomeRegionControlsRequest} +} + +// DescribeHomeRegionControlsRequest is the request type for the +// DescribeHomeRegionControls API operation. +type DescribeHomeRegionControlsRequest struct { + *aws.Request + Input *DescribeHomeRegionControlsInput + Copy func(*DescribeHomeRegionControlsInput) DescribeHomeRegionControlsRequest +} + +// Send marshals and sends the DescribeHomeRegionControls API request. +func (r DescribeHomeRegionControlsRequest) Send(ctx context.Context) (*DescribeHomeRegionControlsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeHomeRegionControlsResponse{ + DescribeHomeRegionControlsOutput: r.Request.Data.(*DescribeHomeRegionControlsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewDescribeHomeRegionControlsRequestPaginator returns a paginator for DescribeHomeRegionControls. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.DescribeHomeRegionControlsRequest(input) +// p := migrationhubconfig.NewDescribeHomeRegionControlsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewDescribeHomeRegionControlsPaginator(req DescribeHomeRegionControlsRequest) DescribeHomeRegionControlsPaginator { + return DescribeHomeRegionControlsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *DescribeHomeRegionControlsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// DescribeHomeRegionControlsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type DescribeHomeRegionControlsPaginator struct { + aws.Pager +} + +func (p *DescribeHomeRegionControlsPaginator) CurrentPage() *DescribeHomeRegionControlsOutput { + return p.Pager.CurrentPage().(*DescribeHomeRegionControlsOutput) +} + +// DescribeHomeRegionControlsResponse is the response type for the +// DescribeHomeRegionControls API operation. +type DescribeHomeRegionControlsResponse struct { + *DescribeHomeRegionControlsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeHomeRegionControls request. +func (r *DescribeHomeRegionControlsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/migrationhubconfig/api_op_GetHomeRegion.go b/service/migrationhubconfig/api_op_GetHomeRegion.go new file mode 100644 index 00000000000..0dc7377a04f --- /dev/null +++ b/service/migrationhubconfig/api_op_GetHomeRegion.go @@ -0,0 +1,103 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package migrationhubconfig + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type GetHomeRegionInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetHomeRegionInput) String() string { + return awsutil.Prettify(s) +} + +type GetHomeRegionOutput struct { + _ struct{} `type:"structure"` + + // The name of the home region of the calling account. + HomeRegion *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetHomeRegionOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetHomeRegion = "GetHomeRegion" + +// GetHomeRegionRequest returns a request value for making API operation for +// AWS Migration Hub Config. +// +// Returns the calling account’s home region, if configured. This API is used +// by other AWS services to determine the regional endpoint for calling AWS +// Application Discovery Service and Migration Hub. You must call GetHomeRegion +// at least once before you call any other AWS Application Discovery Service +// and AWS Migration Hub APIs, to obtain the account's Migration Hub home region. +// +// // Example sending a request using GetHomeRegionRequest. +// req := client.GetHomeRegionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/migrationhub-config-2019-06-30/GetHomeRegion +func (c *Client) GetHomeRegionRequest(input *GetHomeRegionInput) GetHomeRegionRequest { + op := &aws.Operation{ + Name: opGetHomeRegion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetHomeRegionInput{} + } + + req := c.newRequest(op, input, &GetHomeRegionOutput{}) + return GetHomeRegionRequest{Request: req, Input: input, Copy: c.GetHomeRegionRequest} +} + +// GetHomeRegionRequest is the request type for the +// GetHomeRegion API operation. +type GetHomeRegionRequest struct { + *aws.Request + Input *GetHomeRegionInput + Copy func(*GetHomeRegionInput) GetHomeRegionRequest +} + +// Send marshals and sends the GetHomeRegion API request. +func (r GetHomeRegionRequest) Send(ctx context.Context) (*GetHomeRegionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetHomeRegionResponse{ + GetHomeRegionOutput: r.Request.Data.(*GetHomeRegionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetHomeRegionResponse is the response type for the +// GetHomeRegion API operation. +type GetHomeRegionResponse struct { + *GetHomeRegionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetHomeRegion request. +func (r *GetHomeRegionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/migrationhubconfig/api_types.go b/service/migrationhubconfig/api_types.go new file mode 100644 index 00000000000..088670a7eb7 --- /dev/null +++ b/service/migrationhubconfig/api_types.go @@ -0,0 +1,79 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package migrationhubconfig + +import ( + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +var _ aws.Config +var _ = awsutil.Prettify + +// A home region control is an object that specifies the home region for an +// account, with some additional information. It contains a target (always of +// type ACCOUNT), an ID, and a time at which the home region was set. +type HomeRegionControl struct { + _ struct{} `type:"structure"` + + // A unique identifier that's generated for each home region control. It's always + // a string that begins with "hrc-" followed by 12 lowercase letters and numbers. + ControlId *string `min:"1" type:"string"` + + // The AWS Region that's been set as home region. For example, "us-west-2" or + // "eu-central-1" are valid home regions. + HomeRegion *string `min:"1" type:"string"` + + // A timestamp representing the time when the customer called CreateHomeregionControl + // and set the home region for the account. + RequestedTime *time.Time `type:"timestamp"` + + // The target parameter specifies the identifier to which the home region is + // applied, which is always an ACCOUNT. It applies the home region to the current + // ACCOUNT. + Target *Target `type:"structure"` +} + +// String returns the string representation +func (s HomeRegionControl) String() string { + return awsutil.Prettify(s) +} + +// The target parameter specifies the identifier to which the home region is +// applied, which is always an ACCOUNT. It applies the home region to the current +// ACCOUNT. +type Target struct { + _ struct{} `type:"structure"` + + // The TargetID is a 12-character identifier of the ACCOUNT for which the control + // was created. (This must be the current account.) + Id *string `min:"12" type:"string"` + + // The target type is always an ACCOUNT. + // + // Type is a required field + Type TargetType `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s Target) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Target) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "Target"} + if s.Id != nil && len(*s.Id) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("Id", 12)) + } + if len(s.Type) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} diff --git a/service/migrationhubconfig/migrationhubconfigiface/interface.go b/service/migrationhubconfig/migrationhubconfigiface/interface.go new file mode 100644 index 00000000000..9227c5b5216 --- /dev/null +++ b/service/migrationhubconfig/migrationhubconfigiface/interface.go @@ -0,0 +1,71 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package migrationhubconfigiface provides an interface to enable mocking the AWS Migration Hub Config service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package migrationhubconfigiface + +import ( + "github.com/aws/aws-sdk-go-v2/service/migrationhubconfig" +) + +// ClientAPI provides an interface to enable mocking the +// migrationhubconfig.Client methods. This make unit testing your code that +// calls out to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Migration Hub Config. +// func myFunc(svc migrationhubconfigiface.ClientAPI) bool { +// // Make svc.CreateHomeRegionControl request +// } +// +// func main() { +// cfg, err := external.LoadDefaultAWSConfig() +// if err != nil { +// panic("failed to load config, " + err.Error()) +// } +// +// svc := migrationhubconfig.New(cfg) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockClientClient struct { +// migrationhubconfigiface.ClientPI +// } +// func (m *mockClientClient) CreateHomeRegionControl(input *migrationhubconfig.CreateHomeRegionControlInput) (*migrationhubconfig.CreateHomeRegionControlOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockClientClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type ClientAPI interface { + CreateHomeRegionControlRequest(*migrationhubconfig.CreateHomeRegionControlInput) migrationhubconfig.CreateHomeRegionControlRequest + + DescribeHomeRegionControlsRequest(*migrationhubconfig.DescribeHomeRegionControlsInput) migrationhubconfig.DescribeHomeRegionControlsRequest + + GetHomeRegionRequest(*migrationhubconfig.GetHomeRegionInput) migrationhubconfig.GetHomeRegionRequest +} + +var _ ClientAPI = (*migrationhubconfig.Client)(nil) diff --git a/service/personalize/api_op_CreateBatchInferenceJob.go b/service/personalize/api_op_CreateBatchInferenceJob.go new file mode 100644 index 00000000000..6ee31c28a80 --- /dev/null +++ b/service/personalize/api_op_CreateBatchInferenceJob.go @@ -0,0 +1,175 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package personalize + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type CreateBatchInferenceJobInput struct { + _ struct{} `type:"structure"` + + // The Amazon S3 path that leads to the input file to base your recommendations + // on. The input material must be in JSON format. + // + // JobInput is a required field + JobInput *BatchInferenceJobInput `locationName:"jobInput" type:"structure" required:"true"` + + // The name of the batch inference job to create. + // + // JobName is a required field + JobName *string `locationName:"jobName" min:"1" type:"string" required:"true"` + + // The path to the Amazon S3 bucket where the job's output will be stored. + // + // JobOutput is a required field + JobOutput *BatchInferenceJobOutput `locationName:"jobOutput" type:"structure" required:"true"` + + // The number of recommendations to retreive. + NumResults *int64 `locationName:"numResults" type:"integer"` + + // The ARN of the Amazon Identity and Access Management role that has permissions + // to read and write to your input and out Amazon S3 buckets respectively. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the solution version that will be used + // to generate the batch inference recommendations. + // + // SolutionVersionArn is a required field + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateBatchInferenceJobInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBatchInferenceJobInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateBatchInferenceJobInput"} + + if s.JobInput == nil { + invalidParams.Add(aws.NewErrParamRequired("JobInput")) + } + + if s.JobName == nil { + invalidParams.Add(aws.NewErrParamRequired("JobName")) + } + if s.JobName != nil && len(*s.JobName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("JobName", 1)) + } + + if s.JobOutput == nil { + invalidParams.Add(aws.NewErrParamRequired("JobOutput")) + } + + if s.RoleArn == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleArn")) + } + + if s.SolutionVersionArn == nil { + invalidParams.Add(aws.NewErrParamRequired("SolutionVersionArn")) + } + if s.JobInput != nil { + if err := s.JobInput.Validate(); err != nil { + invalidParams.AddNested("JobInput", err.(aws.ErrInvalidParams)) + } + } + if s.JobOutput != nil { + if err := s.JobOutput.Validate(); err != nil { + invalidParams.AddNested("JobOutput", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateBatchInferenceJobOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the batch inference job. + BatchInferenceJobArn *string `locationName:"batchInferenceJobArn" type:"string"` +} + +// String returns the string representation +func (s CreateBatchInferenceJobOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateBatchInferenceJob = "CreateBatchInferenceJob" + +// CreateBatchInferenceJobRequest returns a request value for making API operation for +// Amazon Personalize. +// +// Creates a batch inference job. The operation can handle up to 50 million +// records and the input file must be in JSON format. For more information, +// see recommendations-batch. +// +// // Example sending a request using CreateBatchInferenceJobRequest. +// req := client.CreateBatchInferenceJobRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateBatchInferenceJob +func (c *Client) CreateBatchInferenceJobRequest(input *CreateBatchInferenceJobInput) CreateBatchInferenceJobRequest { + op := &aws.Operation{ + Name: opCreateBatchInferenceJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBatchInferenceJobInput{} + } + + req := c.newRequest(op, input, &CreateBatchInferenceJobOutput{}) + return CreateBatchInferenceJobRequest{Request: req, Input: input, Copy: c.CreateBatchInferenceJobRequest} +} + +// CreateBatchInferenceJobRequest is the request type for the +// CreateBatchInferenceJob API operation. +type CreateBatchInferenceJobRequest struct { + *aws.Request + Input *CreateBatchInferenceJobInput + Copy func(*CreateBatchInferenceJobInput) CreateBatchInferenceJobRequest +} + +// Send marshals and sends the CreateBatchInferenceJob API request. +func (r CreateBatchInferenceJobRequest) Send(ctx context.Context) (*CreateBatchInferenceJobResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateBatchInferenceJobResponse{ + CreateBatchInferenceJobOutput: r.Request.Data.(*CreateBatchInferenceJobOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateBatchInferenceJobResponse is the response type for the +// CreateBatchInferenceJob API operation. +type CreateBatchInferenceJobResponse struct { + *CreateBatchInferenceJobOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateBatchInferenceJob request. +func (r *CreateBatchInferenceJobResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/personalize/api_op_DescribeBatchInferenceJob.go b/service/personalize/api_op_DescribeBatchInferenceJob.go new file mode 100644 index 00000000000..65ccca3cee8 --- /dev/null +++ b/service/personalize/api_op_DescribeBatchInferenceJob.go @@ -0,0 +1,120 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package personalize + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeBatchInferenceJobInput struct { + _ struct{} `type:"structure"` + + // The ARN of the batch inference job to describe. + // + // BatchInferenceJobArn is a required field + BatchInferenceJobArn *string `locationName:"batchInferenceJobArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeBatchInferenceJobInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBatchInferenceJobInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeBatchInferenceJobInput"} + + if s.BatchInferenceJobArn == nil { + invalidParams.Add(aws.NewErrParamRequired("BatchInferenceJobArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeBatchInferenceJobOutput struct { + _ struct{} `type:"structure"` + + // Information on the specified batch inference job. + BatchInferenceJob *BatchInferenceJob `locationName:"batchInferenceJob" type:"structure"` +} + +// String returns the string representation +func (s DescribeBatchInferenceJobOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeBatchInferenceJob = "DescribeBatchInferenceJob" + +// DescribeBatchInferenceJobRequest returns a request value for making API operation for +// Amazon Personalize. +// +// Gets the properties of a batch inference job including name, Amazon Resource +// Name (ARN), status, input and output configurations, and the ARN of the solution +// version used to generate the recommendations. +// +// // Example sending a request using DescribeBatchInferenceJobRequest. +// req := client.DescribeBatchInferenceJobRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeBatchInferenceJob +func (c *Client) DescribeBatchInferenceJobRequest(input *DescribeBatchInferenceJobInput) DescribeBatchInferenceJobRequest { + op := &aws.Operation{ + Name: opDescribeBatchInferenceJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBatchInferenceJobInput{} + } + + req := c.newRequest(op, input, &DescribeBatchInferenceJobOutput{}) + return DescribeBatchInferenceJobRequest{Request: req, Input: input, Copy: c.DescribeBatchInferenceJobRequest} +} + +// DescribeBatchInferenceJobRequest is the request type for the +// DescribeBatchInferenceJob API operation. +type DescribeBatchInferenceJobRequest struct { + *aws.Request + Input *DescribeBatchInferenceJobInput + Copy func(*DescribeBatchInferenceJobInput) DescribeBatchInferenceJobRequest +} + +// Send marshals and sends the DescribeBatchInferenceJob API request. +func (r DescribeBatchInferenceJobRequest) Send(ctx context.Context) (*DescribeBatchInferenceJobResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeBatchInferenceJobResponse{ + DescribeBatchInferenceJobOutput: r.Request.Data.(*DescribeBatchInferenceJobOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeBatchInferenceJobResponse is the response type for the +// DescribeBatchInferenceJob API operation. +type DescribeBatchInferenceJobResponse struct { + *DescribeBatchInferenceJobOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeBatchInferenceJob request. +func (r *DescribeBatchInferenceJobResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/personalize/api_op_ListBatchInferenceJobs.go b/service/personalize/api_op_ListBatchInferenceJobs.go new file mode 100644 index 00000000000..c34f952e453 --- /dev/null +++ b/service/personalize/api_op_ListBatchInferenceJobs.go @@ -0,0 +1,181 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package personalize + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ListBatchInferenceJobsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of batch inference job results to return in each page. + // The default value is 100. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token to request the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The Amazon Resource Name (ARN) of the solution version from which the batch + // inference jobs were created. + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string"` +} + +// String returns the string representation +func (s ListBatchInferenceJobsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBatchInferenceJobsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListBatchInferenceJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListBatchInferenceJobsOutput struct { + _ struct{} `type:"structure"` + + // A list containing information on each job that is returned. + BatchInferenceJobs []BatchInferenceJobSummary `locationName:"batchInferenceJobs" type:"list"` + + // The token to use to retreive the next page of results. The value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListBatchInferenceJobsOutput) String() string { + return awsutil.Prettify(s) +} + +const opListBatchInferenceJobs = "ListBatchInferenceJobs" + +// ListBatchInferenceJobsRequest returns a request value for making API operation for +// Amazon Personalize. +// +// Gets a list of the batch inference jobs that have been performed off of a +// solution version. +// +// // Example sending a request using ListBatchInferenceJobsRequest. +// req := client.ListBatchInferenceJobsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListBatchInferenceJobs +func (c *Client) ListBatchInferenceJobsRequest(input *ListBatchInferenceJobsInput) ListBatchInferenceJobsRequest { + op := &aws.Operation{ + Name: opListBatchInferenceJobs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListBatchInferenceJobsInput{} + } + + req := c.newRequest(op, input, &ListBatchInferenceJobsOutput{}) + return ListBatchInferenceJobsRequest{Request: req, Input: input, Copy: c.ListBatchInferenceJobsRequest} +} + +// ListBatchInferenceJobsRequest is the request type for the +// ListBatchInferenceJobs API operation. +type ListBatchInferenceJobsRequest struct { + *aws.Request + Input *ListBatchInferenceJobsInput + Copy func(*ListBatchInferenceJobsInput) ListBatchInferenceJobsRequest +} + +// Send marshals and sends the ListBatchInferenceJobs API request. +func (r ListBatchInferenceJobsRequest) Send(ctx context.Context) (*ListBatchInferenceJobsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListBatchInferenceJobsResponse{ + ListBatchInferenceJobsOutput: r.Request.Data.(*ListBatchInferenceJobsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListBatchInferenceJobsRequestPaginator returns a paginator for ListBatchInferenceJobs. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListBatchInferenceJobsRequest(input) +// p := personalize.NewListBatchInferenceJobsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListBatchInferenceJobsPaginator(req ListBatchInferenceJobsRequest) ListBatchInferenceJobsPaginator { + return ListBatchInferenceJobsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListBatchInferenceJobsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListBatchInferenceJobsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListBatchInferenceJobsPaginator struct { + aws.Pager +} + +func (p *ListBatchInferenceJobsPaginator) CurrentPage() *ListBatchInferenceJobsOutput { + return p.Pager.CurrentPage().(*ListBatchInferenceJobsOutput) +} + +// ListBatchInferenceJobsResponse is the response type for the +// ListBatchInferenceJobs API operation. +type ListBatchInferenceJobsResponse struct { + *ListBatchInferenceJobsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListBatchInferenceJobs request. +func (r *ListBatchInferenceJobsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/personalize/api_types.go b/service/personalize/api_types.go index 74ca21e0bb6..e69ca8a79c3 100644 --- a/service/personalize/api_types.go +++ b/service/personalize/api_types.go @@ -106,6 +106,171 @@ func (s AutoMLResult) String() string { return awsutil.Prettify(s) } +// Contains information on a batch inference job. +type BatchInferenceJob struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the batch inference job. + BatchInferenceJobArn *string `locationName:"batchInferenceJobArn" type:"string"` + + // The time at which the batch inference job was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // If the batch inference job failed, the reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The Amazon S3 path that leads to the input data used to generate the batch + // inference job. + JobInput *BatchInferenceJobInput `locationName:"jobInput" type:"structure"` + + // The name of the batch inference job. + JobName *string `locationName:"jobName" min:"1" type:"string"` + + // The Amazon S3 bucket that contains the output data generated by the batch + // inference job. + JobOutput *BatchInferenceJobOutput `locationName:"jobOutput" type:"structure"` + + // The time at which the batch inference job was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The number of recommendations generated by the batch inference job. This + // number includes the error messages generated for failed input records. + NumResults *int64 `locationName:"numResults" type:"integer"` + + // The ARN of the Amazon Identity and Access Management (IAM) role that requested + // the batch inference job. + RoleArn *string `locationName:"roleArn" type:"string"` + + // The Amazon Resource Name (ARN) of the solution version from which the batch + // inference job was created. + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string"` + + // The status of the batch inference job. The status is one of the following + // values: + // + // * PENDING + // + // * IN PROGRESS + // + // * ACTIVE + // + // * CREATE FAILED + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s BatchInferenceJob) String() string { + return awsutil.Prettify(s) +} + +// The input configuration of a batch inference job. +type BatchInferenceJobInput struct { + _ struct{} `type:"structure"` + + // The URI of the Amazon S3 location that contains your input data. The Amazon + // S3 bucket must be in the same region as the API endpoint you are calling. + // + // S3DataSource is a required field + S3DataSource *S3DataConfig `locationName:"s3DataSource" type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchInferenceJobInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchInferenceJobInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchInferenceJobInput"} + + if s.S3DataSource == nil { + invalidParams.Add(aws.NewErrParamRequired("S3DataSource")) + } + if s.S3DataSource != nil { + if err := s.S3DataSource.Validate(); err != nil { + invalidParams.AddNested("S3DataSource", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The output configuration parameters of a batch inference job. +type BatchInferenceJobOutput struct { + _ struct{} `type:"structure"` + + // Information on the Amazon S3 bucket in which the batch inference job's output + // is stored. + // + // S3DataDestination is a required field + S3DataDestination *S3DataConfig `locationName:"s3DataDestination" type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchInferenceJobOutput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchInferenceJobOutput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "BatchInferenceJobOutput"} + + if s.S3DataDestination == nil { + invalidParams.Add(aws.NewErrParamRequired("S3DataDestination")) + } + if s.S3DataDestination != nil { + if err := s.S3DataDestination.Validate(); err != nil { + invalidParams.AddNested("S3DataDestination", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A truncated version of the BatchInferenceJob datatype. The ListBatchInferenceJobs +// operation returns a list of batch inference job summaries. +type BatchInferenceJobSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the batch inference job. + BatchInferenceJobArn *string `locationName:"batchInferenceJobArn" type:"string"` + + // The time at which the batch inference job was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // If the batch inference job failed, the reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The name of the batch inference job. + JobName *string `locationName:"jobName" min:"1" type:"string"` + + // The time at which the batch inference job was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The status of the batch inference job. The status is one of the following + // values: + // + // * PENDING + // + // * IN PROGRESS + // + // * ACTIVE + // + // * CREATE FAILED + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s BatchInferenceJobSummary) String() string { + return awsutil.Prettify(s) +} + // Describes a deployed solution version, otherwise known as a campaign. For // more information on campaigns, see CreateCampaign. type Campaign struct { @@ -1009,6 +1174,40 @@ func (s RecipeSummary) String() string { return awsutil.Prettify(s) } +// The configuration details of an Amazon S3 input or output bucket. +type S3DataConfig struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon Key Management Service (KMS) + // key that Amazon Personalize uses to encrypt or decrypt the input and output + // files of a batch inference job. + KmsKeyArn *string `locationName:"kmsKeyArn" type:"string"` + + // The file path of the Amazon S3 bucket. + // + // Path is a required field + Path *string `locationName:"path" type:"string" required:"true"` +} + +// String returns the string representation +func (s S3DataConfig) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3DataConfig) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "S3DataConfig"} + + if s.Path == nil { + invalidParams.Add(aws.NewErrParamRequired("Path")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // An object that provides information about a solution. A solution is a trained // model that can be deployed as a campaign. type Solution struct { @@ -1090,9 +1289,7 @@ type SolutionConfig struct { // Lists the feature transformation parameters. FeatureTransformationParameters map[string]string `locationName:"featureTransformationParameters" type:"map"` - // Describes the properties for hyperparameter optimization (HPO). For use with - // the bring-your-own-recipe feature. Not used with Amazon Personalize predefined - // recipes. + // Describes the properties for hyperparameter optimization (HPO). HpoConfig *HPOConfig `locationName:"hpoConfig" type:"structure"` } diff --git a/service/personalize/personalizeiface/interface.go b/service/personalize/personalizeiface/interface.go index 085071d8eb5..832bfa7b5b3 100644 --- a/service/personalize/personalizeiface/interface.go +++ b/service/personalize/personalizeiface/interface.go @@ -23,7 +23,7 @@ import ( // // myFunc uses an SDK service client to make a request to // // Amazon Personalize. // func myFunc(svc personalizeiface.ClientAPI) bool { -// // Make svc.CreateCampaign request +// // Make svc.CreateBatchInferenceJob request // } // // func main() { @@ -43,7 +43,7 @@ import ( // type mockClientClient struct { // personalizeiface.ClientPI // } -// func (m *mockClientClient) CreateCampaign(input *personalize.CreateCampaignInput) (*personalize.CreateCampaignOutput, error) { +// func (m *mockClientClient) CreateBatchInferenceJob(input *personalize.CreateBatchInferenceJobInput) (*personalize.CreateBatchInferenceJobOutput, error) { // // mock response/functionality // } // @@ -61,6 +61,8 @@ import ( // and waiters. Its suggested to use the pattern above for testing, or using // tooling to generate mocks to satisfy the interfaces. type ClientAPI interface { + CreateBatchInferenceJobRequest(*personalize.CreateBatchInferenceJobInput) personalize.CreateBatchInferenceJobRequest + CreateCampaignRequest(*personalize.CreateCampaignInput) personalize.CreateCampaignRequest CreateDatasetRequest(*personalize.CreateDatasetInput) personalize.CreateDatasetRequest @@ -91,6 +93,8 @@ type ClientAPI interface { DescribeAlgorithmRequest(*personalize.DescribeAlgorithmInput) personalize.DescribeAlgorithmRequest + DescribeBatchInferenceJobRequest(*personalize.DescribeBatchInferenceJobInput) personalize.DescribeBatchInferenceJobRequest + DescribeCampaignRequest(*personalize.DescribeCampaignInput) personalize.DescribeCampaignRequest DescribeDatasetRequest(*personalize.DescribeDatasetInput) personalize.DescribeDatasetRequest @@ -113,6 +117,8 @@ type ClientAPI interface { GetSolutionMetricsRequest(*personalize.GetSolutionMetricsInput) personalize.GetSolutionMetricsRequest + ListBatchInferenceJobsRequest(*personalize.ListBatchInferenceJobsInput) personalize.ListBatchInferenceJobsRequest + ListCampaignsRequest(*personalize.ListCampaignsInput) personalize.ListCampaignsRequest ListDatasetGroupsRequest(*personalize.ListDatasetGroupsInput) personalize.ListDatasetGroupsRequest diff --git a/service/pinpoint/api_op_CreateVoiceTemplate.go b/service/pinpoint/api_op_CreateVoiceTemplate.go new file mode 100644 index 00000000000..04235313c41 --- /dev/null +++ b/service/pinpoint/api_op_CreateVoiceTemplate.go @@ -0,0 +1,160 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package pinpoint + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateVoiceTemplateInput struct { + _ struct{} `type:"structure" payload:"VoiceTemplateRequest"` + + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` + + // Specifies the content and settings for a message template that can be used + // in messages that are sent through the voice channel. + // + // VoiceTemplateRequest is a required field + VoiceTemplateRequest *VoiceTemplateRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateVoiceTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVoiceTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateVoiceTemplateInput"} + + if s.TemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateName")) + } + + if s.VoiceTemplateRequest == nil { + invalidParams.Add(aws.NewErrParamRequired("VoiceTemplateRequest")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateVoiceTemplateInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.TemplateName != nil { + v := *s.TemplateName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "template-name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.VoiceTemplateRequest != nil { + v := s.VoiceTemplateRequest + + metadata := protocol.Metadata{} + e.SetFields(protocol.PayloadTarget, "VoiceTemplateRequest", v, metadata) + } + return nil +} + +type CreateVoiceTemplateOutput struct { + _ struct{} `type:"structure" payload:"CreateTemplateMessageBody"` + + // Provides information about a request to create a message template. + // + // CreateTemplateMessageBody is a required field + CreateTemplateMessageBody *CreateTemplateMessageBody `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateVoiceTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateVoiceTemplateOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.CreateTemplateMessageBody != nil { + v := s.CreateTemplateMessageBody + + metadata := protocol.Metadata{} + e.SetFields(protocol.PayloadTarget, "CreateTemplateMessageBody", v, metadata) + } + return nil +} + +const opCreateVoiceTemplate = "CreateVoiceTemplate" + +// CreateVoiceTemplateRequest returns a request value for making API operation for +// Amazon Pinpoint. +// +// Creates a message template that you can use in messages that are sent through +// the voice channel. +// +// // Example sending a request using CreateVoiceTemplateRequest. +// req := client.CreateVoiceTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/CreateVoiceTemplate +func (c *Client) CreateVoiceTemplateRequest(input *CreateVoiceTemplateInput) CreateVoiceTemplateRequest { + op := &aws.Operation{ + Name: opCreateVoiceTemplate, + HTTPMethod: "POST", + HTTPPath: "/v1/templates/{template-name}/voice", + } + + if input == nil { + input = &CreateVoiceTemplateInput{} + } + + req := c.newRequest(op, input, &CreateVoiceTemplateOutput{}) + return CreateVoiceTemplateRequest{Request: req, Input: input, Copy: c.CreateVoiceTemplateRequest} +} + +// CreateVoiceTemplateRequest is the request type for the +// CreateVoiceTemplate API operation. +type CreateVoiceTemplateRequest struct { + *aws.Request + Input *CreateVoiceTemplateInput + Copy func(*CreateVoiceTemplateInput) CreateVoiceTemplateRequest +} + +// Send marshals and sends the CreateVoiceTemplate API request. +func (r CreateVoiceTemplateRequest) Send(ctx context.Context) (*CreateVoiceTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateVoiceTemplateResponse{ + CreateVoiceTemplateOutput: r.Request.Data.(*CreateVoiceTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateVoiceTemplateResponse is the response type for the +// CreateVoiceTemplate API operation. +type CreateVoiceTemplateResponse struct { + *CreateVoiceTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateVoiceTemplate request. +func (r *CreateVoiceTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/pinpoint/api_op_DeleteVoiceTemplate.go b/service/pinpoint/api_op_DeleteVoiceTemplate.go new file mode 100644 index 00000000000..6e0ce88c65c --- /dev/null +++ b/service/pinpoint/api_op_DeleteVoiceTemplate.go @@ -0,0 +1,144 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package pinpoint + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DeleteVoiceTemplateInput struct { + _ struct{} `type:"structure"` + + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVoiceTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVoiceTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteVoiceTemplateInput"} + + if s.TemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteVoiceTemplateInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.TemplateName != nil { + v := *s.TemplateName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "template-name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteVoiceTemplateOutput struct { + _ struct{} `type:"structure" payload:"MessageBody"` + + // Provides information about an API request or response. + // + // MessageBody is a required field + MessageBody *MessageBody `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteVoiceTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteVoiceTemplateOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.MessageBody != nil { + v := s.MessageBody + + metadata := protocol.Metadata{} + e.SetFields(protocol.PayloadTarget, "MessageBody", v, metadata) + } + return nil +} + +const opDeleteVoiceTemplate = "DeleteVoiceTemplate" + +// DeleteVoiceTemplateRequest returns a request value for making API operation for +// Amazon Pinpoint. +// +// Deletes a message template that was designed for use in messages that were +// sent through the voice channel. +// +// // Example sending a request using DeleteVoiceTemplateRequest. +// req := client.DeleteVoiceTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/DeleteVoiceTemplate +func (c *Client) DeleteVoiceTemplateRequest(input *DeleteVoiceTemplateInput) DeleteVoiceTemplateRequest { + op := &aws.Operation{ + Name: opDeleteVoiceTemplate, + HTTPMethod: "DELETE", + HTTPPath: "/v1/templates/{template-name}/voice", + } + + if input == nil { + input = &DeleteVoiceTemplateInput{} + } + + req := c.newRequest(op, input, &DeleteVoiceTemplateOutput{}) + return DeleteVoiceTemplateRequest{Request: req, Input: input, Copy: c.DeleteVoiceTemplateRequest} +} + +// DeleteVoiceTemplateRequest is the request type for the +// DeleteVoiceTemplate API operation. +type DeleteVoiceTemplateRequest struct { + *aws.Request + Input *DeleteVoiceTemplateInput + Copy func(*DeleteVoiceTemplateInput) DeleteVoiceTemplateRequest +} + +// Send marshals and sends the DeleteVoiceTemplate API request. +func (r DeleteVoiceTemplateRequest) Send(ctx context.Context) (*DeleteVoiceTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteVoiceTemplateResponse{ + DeleteVoiceTemplateOutput: r.Request.Data.(*DeleteVoiceTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteVoiceTemplateResponse is the response type for the +// DeleteVoiceTemplate API operation. +type DeleteVoiceTemplateResponse struct { + *DeleteVoiceTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteVoiceTemplate request. +func (r *DeleteVoiceTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/pinpoint/api_op_GetVoiceTemplate.go b/service/pinpoint/api_op_GetVoiceTemplate.go new file mode 100644 index 00000000000..93bbc29671e --- /dev/null +++ b/service/pinpoint/api_op_GetVoiceTemplate.go @@ -0,0 +1,145 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package pinpoint + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type GetVoiceTemplateInput struct { + _ struct{} `type:"structure"` + + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetVoiceTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetVoiceTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetVoiceTemplateInput"} + + if s.TemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetVoiceTemplateInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.TemplateName != nil { + v := *s.TemplateName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "template-name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type GetVoiceTemplateOutput struct { + _ struct{} `type:"structure" payload:"VoiceTemplateResponse"` + + // Provides information about the content and settings for a message template + // that can be used in messages that are sent through the voice channel. + // + // VoiceTemplateResponse is a required field + VoiceTemplateResponse *VoiceTemplateResponse `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetVoiceTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetVoiceTemplateOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.VoiceTemplateResponse != nil { + v := s.VoiceTemplateResponse + + metadata := protocol.Metadata{} + e.SetFields(protocol.PayloadTarget, "VoiceTemplateResponse", v, metadata) + } + return nil +} + +const opGetVoiceTemplate = "GetVoiceTemplate" + +// GetVoiceTemplateRequest returns a request value for making API operation for +// Amazon Pinpoint. +// +// Retrieves the content and settings for a message template that you can use +// in messages that are sent through the voice channel. +// +// // Example sending a request using GetVoiceTemplateRequest. +// req := client.GetVoiceTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/GetVoiceTemplate +func (c *Client) GetVoiceTemplateRequest(input *GetVoiceTemplateInput) GetVoiceTemplateRequest { + op := &aws.Operation{ + Name: opGetVoiceTemplate, + HTTPMethod: "GET", + HTTPPath: "/v1/templates/{template-name}/voice", + } + + if input == nil { + input = &GetVoiceTemplateInput{} + } + + req := c.newRequest(op, input, &GetVoiceTemplateOutput{}) + return GetVoiceTemplateRequest{Request: req, Input: input, Copy: c.GetVoiceTemplateRequest} +} + +// GetVoiceTemplateRequest is the request type for the +// GetVoiceTemplate API operation. +type GetVoiceTemplateRequest struct { + *aws.Request + Input *GetVoiceTemplateInput + Copy func(*GetVoiceTemplateInput) GetVoiceTemplateRequest +} + +// Send marshals and sends the GetVoiceTemplate API request. +func (r GetVoiceTemplateRequest) Send(ctx context.Context) (*GetVoiceTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetVoiceTemplateResponse{ + GetVoiceTemplateOutput: r.Request.Data.(*GetVoiceTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetVoiceTemplateResponse is the response type for the +// GetVoiceTemplate API operation. +type GetVoiceTemplateResponse struct { + *GetVoiceTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetVoiceTemplate request. +func (r *GetVoiceTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/pinpoint/api_op_SendMessages.go b/service/pinpoint/api_op_SendMessages.go index e77fa793f5b..232f674a3d6 100644 --- a/service/pinpoint/api_op_SendMessages.go +++ b/service/pinpoint/api_op_SendMessages.go @@ -16,8 +16,7 @@ type SendMessagesInput struct { // ApplicationId is a required field ApplicationId *string `location:"uri" locationName:"application-id" type:"string" required:"true"` - // Specifies the objects that define configuration and other settings for a - // message. + // Specifies the configuration and other settings for a message. // // MessageRequest is a required field MessageRequest *MessageRequest `type:"structure" required:"true"` diff --git a/service/pinpoint/api_op_UpdateVoiceTemplate.go b/service/pinpoint/api_op_UpdateVoiceTemplate.go new file mode 100644 index 00000000000..7e059d379da --- /dev/null +++ b/service/pinpoint/api_op_UpdateVoiceTemplate.go @@ -0,0 +1,160 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package pinpoint + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateVoiceTemplateInput struct { + _ struct{} `type:"structure" payload:"VoiceTemplateRequest"` + + // TemplateName is a required field + TemplateName *string `location:"uri" locationName:"template-name" type:"string" required:"true"` + + // Specifies the content and settings for a message template that can be used + // in messages that are sent through the voice channel. + // + // VoiceTemplateRequest is a required field + VoiceTemplateRequest *VoiceTemplateRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateVoiceTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateVoiceTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateVoiceTemplateInput"} + + if s.TemplateName == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateName")) + } + + if s.VoiceTemplateRequest == nil { + invalidParams.Add(aws.NewErrParamRequired("VoiceTemplateRequest")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateVoiceTemplateInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.TemplateName != nil { + v := *s.TemplateName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "template-name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.VoiceTemplateRequest != nil { + v := s.VoiceTemplateRequest + + metadata := protocol.Metadata{} + e.SetFields(protocol.PayloadTarget, "VoiceTemplateRequest", v, metadata) + } + return nil +} + +type UpdateVoiceTemplateOutput struct { + _ struct{} `type:"structure" payload:"MessageBody"` + + // Provides information about an API request or response. + // + // MessageBody is a required field + MessageBody *MessageBody `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateVoiceTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateVoiceTemplateOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.MessageBody != nil { + v := s.MessageBody + + metadata := protocol.Metadata{} + e.SetFields(protocol.PayloadTarget, "MessageBody", v, metadata) + } + return nil +} + +const opUpdateVoiceTemplate = "UpdateVoiceTemplate" + +// UpdateVoiceTemplateRequest returns a request value for making API operation for +// Amazon Pinpoint. +// +// Updates an existing message template that you can use in messages that are +// sent through the voice channel. +// +// // Example sending a request using UpdateVoiceTemplateRequest. +// req := client.UpdateVoiceTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateVoiceTemplate +func (c *Client) UpdateVoiceTemplateRequest(input *UpdateVoiceTemplateInput) UpdateVoiceTemplateRequest { + op := &aws.Operation{ + Name: opUpdateVoiceTemplate, + HTTPMethod: "PUT", + HTTPPath: "/v1/templates/{template-name}/voice", + } + + if input == nil { + input = &UpdateVoiceTemplateInput{} + } + + req := c.newRequest(op, input, &UpdateVoiceTemplateOutput{}) + return UpdateVoiceTemplateRequest{Request: req, Input: input, Copy: c.UpdateVoiceTemplateRequest} +} + +// UpdateVoiceTemplateRequest is the request type for the +// UpdateVoiceTemplate API operation. +type UpdateVoiceTemplateRequest struct { + *aws.Request + Input *UpdateVoiceTemplateInput + Copy func(*UpdateVoiceTemplateInput) UpdateVoiceTemplateRequest +} + +// Send marshals and sends the UpdateVoiceTemplate API request. +func (r UpdateVoiceTemplateRequest) Send(ctx context.Context) (*UpdateVoiceTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateVoiceTemplateResponse{ + UpdateVoiceTemplateOutput: r.Request.Data.(*UpdateVoiceTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateVoiceTemplateResponse is the response type for the +// UpdateVoiceTemplate API operation. +type UpdateVoiceTemplateResponse struct { + *UpdateVoiceTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateVoiceTemplate request. +func (r *UpdateVoiceTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/pinpoint/api_types.go b/service/pinpoint/api_types.go index ffac32a5640..a2fe991a34c 100644 --- a/service/pinpoint/api_types.go +++ b/service/pinpoint/api_types.go @@ -242,7 +242,7 @@ type ADMMessage struct { MD5 *string `type:"string"` // The raw, JSON-formatted string to use as the payload for the notification - // message. This value overrides the message. + // message. If specified, this value overrides all other content for the message. RawContent *string `type:"string"` // Specifies whether the notification is a silent push notification, which is @@ -720,7 +720,7 @@ type APNSMessage struct { Priority *string `type:"string"` // The raw, JSON-formatted string to use as the payload for the notification - // message. This value overrides all other content for the message. + // message. If specified, this value overrides all other content for the message. // // If you specify the raw content of an APNs push notification, the message // payload has to include the content-available key. The value of the content-available @@ -955,6 +955,11 @@ type APNSPushNotificationTemplate struct { // on the message template. MediaUrl *string `type:"string"` + // The raw, JSON-formatted string to use as the payload for push notifications + // that are based on the message template. If specified, this value overrides + // all other content for the message template. + RawContent *string `type:"string"` + // The key for the sound to play when the recipient receives a push notification // that's based on the message template. The value for this key is the name // of a sound file in your app's main bundle or the Library/Sounds folder in @@ -997,6 +1002,12 @@ func (s APNSPushNotificationTemplate) MarshalFields(e protocol.FieldEncoder) err metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "MediaUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.RawContent != nil { + v := *s.RawContent + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RawContent", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.Sound != nil { v := *s.Sound @@ -2018,13 +2029,13 @@ type AddressConfiguration struct { // to email/SMS delivery receipt event attributes. Context map[string]string `type:"map"` - // The raw, JSON-formatted string to use as the payload for the notification - // message. This value overrides the message. + // The raw, JSON-formatted string to use as the payload for the message. If + // specified, this value overrides all other values for the message. RawContent *string `type:"string"` - // An object that maps variable values for the message. Amazon Pinpoint merges - // these values with the variable values specified by properties of the DefaultMessage - // object. The substitutions in this map take precedence over all other substitutions. + // A map of the message variables to merge with the variables specified by properties + // of the DefaultMessage object. The variables specified in this map take precedence + // over all other variables. Substitutions map[string][]string `type:"map"` // The message title to use instead of the default message title. This value @@ -2128,6 +2139,11 @@ type AndroidPushNotificationTemplate struct { // message template. ImageUrl *string `type:"string"` + // The raw, JSON-formatted string to use as the payload for a push notification + // that's based on the message template. If specified, this value overrides + // all other content for the message template. + RawContent *string `type:"string"` + // The URL of the small icon image to display in the status bar and the content // view of a push notification that's based on the message template. SmallImageIconUrl *string `type:"string"` @@ -2179,6 +2195,12 @@ func (s AndroidPushNotificationTemplate) MarshalFields(e protocol.FieldEncoder) metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "ImageUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.RawContent != nil { + v := *s.RawContent + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RawContent", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.SmallImageIconUrl != nil { v := *s.SmallImageIconUrl @@ -2830,7 +2852,7 @@ type BaiduMessage struct { ImageUrl *string `type:"string"` // The raw, JSON-formatted string to use as the payload for the notification - // message. This value overrides the message. + // message. If specified, this value overrides all other content for the message. RawContent *string `type:"string"` // Specifies whether the notification is a silent push notification, which is @@ -3112,7 +3134,7 @@ func (s CampaignDateRangeKpiResponse) MarshalFields(e protocol.FieldEncoder) err type CampaignEmailMessage struct { _ struct{} `type:"structure"` - // The body of the email for recipients whose email clients don't support HTML + // The body of the email for recipients whose email clients don't render HTML // content. Body *string `type:"string"` @@ -3121,7 +3143,7 @@ type CampaignEmailMessage struct { FromAddress *string `type:"string"` // The body of the email, in HTML format, for recipients whose email clients - // support HTML content. + // render HTML content. HtmlBody *string `type:"string"` // The subject line, or title, of the email. @@ -4077,16 +4099,15 @@ func (s CreateTemplateMessageBody) MarshalFields(e protocol.FieldEncoder) error return nil } -// Specifies the default message to use for all channels. +// Specifies the default message for all channels. type DefaultMessage struct { _ struct{} `type:"structure"` - // The default message body of the push notification, email, or SMS message. + // The default body of the message. Body *string `type:"string"` - // The default message variables to use in the push notification, email, or - // SMS message. You can override these default variables with individual address - // variables. + // The default message variables to use in the message. You can override these + // default variables with individual address variables. Substitutions map[string][]string `type:"map"` } @@ -4340,7 +4361,7 @@ type DirectMessageConfiguration struct { // This message overrides the default push notification message (DefaultPushNotificationMessage). BaiduMessage *BaiduMessage `type:"structure"` - // The default message body for all channels. + // The default message for all channels. DefaultMessage *DefaultMessage `type:"structure"` // The default push notification message for all push notification channels. @@ -4826,10 +4847,18 @@ func (s EmailMessageActivity) MarshalFields(e protocol.FieldEncoder) error { type EmailTemplateRequest struct { _ struct{} `type:"structure"` + // A JSON object that specifies the default values to use for message variables + // in the message template. This object is a set of key-value pairs. Each key + // defines a message variable in the template. The corresponding value defines + // the default value for that variable. When you create a message that's based + // on the template, you can override these defaults with message-specific and + // address-specific variables and values. + DefaultSubstitutions *string `type:"string"` + // The message body, in HTML format, to use in email messages that are based // on the message template. We recommend using HTML format for email clients - // that support HTML. You can include links, formatted text, and more in an - // HTML message. + // that render HTML content. You can include links, formatted text, and more + // in an HTML message. HtmlPart *string `type:"string"` // The subject line, or title, to use in email messages that are based on the @@ -4841,10 +4870,13 @@ type EmailTemplateRequest struct { // associated tag value. Tags map[string]string `locationName:"tags" type:"map"` - // The message body, in text format, to use in email messages that are based - // on the message template. We recommend using text format for email clients - // that don't support HTML and clients that are connected to high-latency networks, - // such as mobile devices. + // A custom description of the message template. + TemplateDescription *string `type:"string"` + + // The message body, in plain text format, to use in email messages that are + // based on the message template. We recommend using plain text format for email + // clients that don't render HTML content and clients that are connected to + // high-latency networks, such as mobile devices. TextPart *string `type:"string"` } @@ -4855,6 +4887,12 @@ func (s EmailTemplateRequest) String() string { // MarshalFields encodes the AWS API shape using the passed in protocol encoder. func (s EmailTemplateRequest) MarshalFields(e protocol.FieldEncoder) error { + if s.DefaultSubstitutions != nil { + v := *s.DefaultSubstitutions + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DefaultSubstitutions", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.HtmlPart != nil { v := *s.HtmlPart @@ -4879,6 +4917,12 @@ func (s EmailTemplateRequest) MarshalFields(e protocol.FieldEncoder) error { ms0.End() } + if s.TemplateDescription != nil { + v := *s.TemplateDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.TextPart != nil { v := *s.TextPart @@ -4901,6 +4945,12 @@ type EmailTemplateResponse struct { // CreationDate is a required field CreationDate *string `type:"string" required:"true"` + // The JSON object that specifies the default values that are used for message + // variables in the message template. This object is a set of key-value pairs. + // Each key defines a message variable in the template. The corresponding value + // defines the default value for that variable. + DefaultSubstitutions *string `type:"string"` + // The message body, in HTML format, that's used in email messages that are // based on the message template. HtmlPart *string `type:"string"` @@ -4919,6 +4969,9 @@ type EmailTemplateResponse struct { // key and an associated tag value. Tags map[string]string `locationName:"tags" type:"map"` + // The custom description of the message template. + TemplateDescription *string `type:"string"` + // The name of the message template. // // TemplateName is a required field @@ -4930,8 +4983,8 @@ type EmailTemplateResponse struct { // TemplateType is a required field TemplateType TemplateType `type:"string" required:"true" enum:"true"` - // The message body, in text format, that's used in email messages that are - // based on the message template. + // The message body, in plain text format, that's used in email messages that + // are based on the message template. TextPart *string `type:"string"` } @@ -4954,6 +5007,12 @@ func (s EmailTemplateResponse) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "CreationDate", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.DefaultSubstitutions != nil { + v := *s.DefaultSubstitutions + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DefaultSubstitutions", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.HtmlPart != nil { v := *s.HtmlPart @@ -4984,6 +5043,12 @@ func (s EmailTemplateResponse) MarshalFields(e protocol.FieldEncoder) error { ms0.End() } + if s.TemplateDescription != nil { + v := *s.TemplateDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.TemplateName != nil { v := *s.TemplateName @@ -5888,7 +5953,7 @@ type EndpointSendConfiguration struct { Context map[string]string `type:"map"` // The raw, JSON-formatted string to use as the payload for the message. If - // specified, this value overrides the message. + // specified, this value overrides all other values for the message. RawContent *string `type:"string"` // A map of the message variables to merge with the variables specified for @@ -7209,7 +7274,7 @@ type GCMMessage struct { Priority *string `type:"string"` // The raw, JSON-formatted string to use as the payload for the notification - // message. This value overrides the message. + // message. If specified, this value overrides all other content for the message. RawContent *string `type:"string"` // The package name of the application where registration tokens must match @@ -8775,7 +8840,7 @@ type Message struct { MediaUrl *string `type:"string"` // The raw, JSON-formatted string to use as the payload for the notification - // message. This value overrides other values for the message. + // message. If specified, this value overrides all other content for the message. RawContent *string `type:"string"` // Specifies whether the notification is a silent push notification, which is @@ -9019,8 +9084,7 @@ func (s MessageConfiguration) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Specifies the objects that define configuration and other settings for a -// message. +// Specifies the configuration and other settings for a message. type MessageRequest struct { _ struct{} `type:"structure"` @@ -9042,7 +9106,8 @@ type MessageRequest struct { // as content overrides and message variables. Endpoints map[string]EndpointSendConfiguration `type:"map"` - // The set of properties that defines the configuration settings for the message. + // The settings and content for the default message and any default messages + // that you defined for specific channels. // // MessageConfiguration is a required field MessageConfiguration *DirectMessageConfiguration `type:"structure" required:"true"` @@ -9829,6 +9894,14 @@ type PushNotificationTemplateRequest struct { // The default message template to use for push notification channels. Default *DefaultPushNotificationTemplate `type:"structure"` + // A JSON object that specifies the default values to use for message variables + // in the message template. This object is a set of key-value pairs. Each key + // defines a message variable in the template. The corresponding value defines + // the default value for that variable. When you create a message that's based + // on the template, you can override these defaults with message-specific and + // address-specific variables and values. + DefaultSubstitutions *string `type:"string"` + // The message template to use for the GCM channel, which is used to send notifications // through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging // (GCM), service. This message template overrides the default template for @@ -9839,6 +9912,9 @@ type PushNotificationTemplateRequest struct { // with the message template. Each tag consists of a required tag key and an // associated tag value. Tags map[string]string `locationName:"tags" type:"map"` + + // A custom description of the message template. + TemplateDescription *string `type:"string"` } // String returns the string representation @@ -9872,6 +9948,12 @@ func (s PushNotificationTemplateRequest) MarshalFields(e protocol.FieldEncoder) metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "Default", v, metadata) } + if s.DefaultSubstitutions != nil { + v := *s.DefaultSubstitutions + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DefaultSubstitutions", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.GCM != nil { v := s.GCM @@ -9890,6 +9972,12 @@ func (s PushNotificationTemplateRequest) MarshalFields(e protocol.FieldEncoder) ms0.End() } + if s.TemplateDescription != nil { + v := *s.TemplateDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } return nil } @@ -9924,6 +10012,12 @@ type PushNotificationTemplateResponse struct { // The default message template that's used for push notification channels. Default *DefaultPushNotificationTemplate `type:"structure"` + // The JSON object that specifies the default values that are used for message + // variables in the message template. This object is a set of key-value pairs. + // Each key defines a message variable in the template. The corresponding value + // defines the default value for that variable. + DefaultSubstitutions *string `type:"string"` + // The message template that's used for the GCM channel, which is used to send // notifications through the Firebase Cloud Messaging (FCM), formerly Google // Cloud Messaging (GCM), service. This message template overrides the default @@ -9940,6 +10034,9 @@ type PushNotificationTemplateResponse struct { // key and an associated tag value. Tags map[string]string `locationName:"tags" type:"map"` + // The custom description of the message template. + TemplateDescription *string `type:"string"` + // The name of the message template. // // TemplateName is a required field @@ -9995,6 +10092,12 @@ func (s PushNotificationTemplateResponse) MarshalFields(e protocol.FieldEncoder) metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "Default", v, metadata) } + if s.DefaultSubstitutions != nil { + v := *s.DefaultSubstitutions + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DefaultSubstitutions", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.GCM != nil { v := s.GCM @@ -10019,6 +10122,12 @@ func (s PushNotificationTemplateResponse) MarshalFields(e protocol.FieldEncoder) ms0.End() } + if s.TemplateDescription != nil { + v := *s.TemplateDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.TemplateName != nil { v := *s.TemplateName @@ -10618,10 +10727,21 @@ type SMSTemplateRequest struct { // The message body to use in text messages that are based on the message template. Body *string `type:"string"` + // A JSON object that specifies the default values to use for message variables + // in the message template. This object is a set of key-value pairs. Each key + // defines a message variable in the template. The corresponding value defines + // the default value for that variable. When you create a message that's based + // on the template, you can override these defaults with message-specific and + // address-specific variables and values. + DefaultSubstitutions *string `type:"string"` + // A string-to-string map of key-value pairs that defines the tags to associate // with the message template. Each tag consists of a required tag key and an // associated tag value. Tags map[string]string `locationName:"tags" type:"map"` + + // A custom description of the message template. + TemplateDescription *string `type:"string"` } // String returns the string representation @@ -10637,6 +10757,12 @@ func (s SMSTemplateRequest) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "Body", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.DefaultSubstitutions != nil { + v := *s.DefaultSubstitutions + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DefaultSubstitutions", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.Tags != nil { v := s.Tags @@ -10649,6 +10775,12 @@ func (s SMSTemplateRequest) MarshalFields(e protocol.FieldEncoder) error { ms0.End() } + if s.TemplateDescription != nil { + v := *s.TemplateDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } return nil } @@ -10669,6 +10801,12 @@ type SMSTemplateResponse struct { // CreationDate is a required field CreationDate *string `type:"string" required:"true"` + // The JSON object that specifies the default values that are used for message + // variables in the message template. This object is a set of key-value pairs. + // Each key defines a message variable in the template. The corresponding value + // defines the default value for that variable. + DefaultSubstitutions *string `type:"string"` + // The date when the message template was last modified. // // LastModifiedDate is a required field @@ -10679,6 +10817,9 @@ type SMSTemplateResponse struct { // key and an associated tag value. Tags map[string]string `locationName:"tags" type:"map"` + // The custom description of the message template. + TemplateDescription *string `type:"string"` + // The name of the message template. // // TemplateName is a required field @@ -10716,6 +10857,12 @@ func (s SMSTemplateResponse) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "CreationDate", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.DefaultSubstitutions != nil { + v := *s.DefaultSubstitutions + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DefaultSubstitutions", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.LastModifiedDate != nil { v := *s.LastModifiedDate @@ -10734,6 +10881,12 @@ func (s SMSTemplateResponse) MarshalFields(e protocol.FieldEncoder) error { ms0.End() } + if s.TemplateDescription != nil { + v := *s.TemplateDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.TemplateName != nil { v := *s.TemplateName @@ -11762,7 +11915,7 @@ type SendUsersMessageRequest struct { // it generates for users-messages deliveries. Context map[string]string `type:"map"` - // The message definitions for the default message and any default messages + // The settings and content for the default message and any default messages // that you defined for specific channels. // // MessageConfiguration is a required field @@ -12115,17 +12268,17 @@ func (s SimpleCondition) MarshalFields(e protocol.FieldEncoder) error { type SimpleEmail struct { _ struct{} `type:"structure"` - // The body of the email message, in HTML format. We recommend using an HTML - // part for email clients that support HTML. You can include links, formatted + // The body of the email message, in HTML format. We recommend using HTML format + // for email clients that render HTML content. You can include links, formatted // text, and more in an HTML message. HtmlPart *SimpleEmailPart `type:"structure"` // The subject line, or title, of the email. Subject *SimpleEmailPart `type:"structure"` - // The body of the email message, in text format. We recommend using a text - // part for email clients that don't support HTML and clients that are connected - // to high-latency networks, such as mobile devices. + // The body of the email message, in plain text format. We recommend using plain + // text format for email clients that don't render HTML content and clients + // that are connected to high-latency networks, such as mobile devices. TextPart *SimpleEmailPart `type:"structure"` } @@ -12319,7 +12472,7 @@ func (s Template) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Specifies the message template for each type of channel. +// Specifies the message template to use for the message, for each type of channel. type TemplateConfiguration struct { _ struct{} `type:"structure"` @@ -12331,6 +12484,9 @@ type TemplateConfiguration struct { // The SMS template to use for the message. SMSTemplate *Template `type:"structure"` + + // The voice template to use for the message. + VoiceTemplate *Template `type:"structure"` } // String returns the string representation @@ -12358,6 +12514,12 @@ func (s TemplateConfiguration) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "SMSTemplate", v, metadata) } + if s.VoiceTemplate != nil { + v := s.VoiceTemplate + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "VoiceTemplate", v, metadata) + } return nil } @@ -12374,6 +12536,12 @@ type TemplateResponse struct { // CreationDate is a required field CreationDate *string `type:"string" required:"true"` + // The JSON object that specifies the default values that are used for message + // variables in the message template. This object is a set of key-value pairs. + // Each key defines a message variable in the template. The corresponding value + // defines the default value for that variable. + DefaultSubstitutions *string `type:"string"` + // The date when the message template was last modified. // // LastModifiedDate is a required field @@ -12384,6 +12552,9 @@ type TemplateResponse struct { // key and an associated tag value. Tags map[string]string `locationName:"tags" type:"map"` + // The custom description of the message template. + TemplateDescription *string `type:"string"` + // The name of the message template. // // TemplateName is a required field @@ -12414,6 +12585,12 @@ func (s TemplateResponse) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "CreationDate", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.DefaultSubstitutions != nil { + v := *s.DefaultSubstitutions + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DefaultSubstitutions", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.LastModifiedDate != nil { v := *s.LastModifiedDate @@ -12432,6 +12609,12 @@ func (s TemplateResponse) MarshalFields(e protocol.FieldEncoder) error { ms0.End() } + if s.TemplateDescription != nil { + v := *s.TemplateDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.TemplateName != nil { v := *s.TemplateName @@ -12760,11 +12943,12 @@ func (s VoiceChannelResponse) MarshalFields(e protocol.FieldEncoder) error { type VoiceMessage struct { _ struct{} `type:"structure"` - // The text script for the voice message. + // The text of the script to use for the voice message. Body *string `type:"string"` - // The language to use when delivering the message. For a list of supported - // languages, see the Amazon Polly Developer Guide (AmazonPollyDG.html). + // The code for the language to use when synthesizing the text of the message + // script. For a list of supported languages and the code for each one, see + // the Amazon Polly Developer Guide (https://docs.aws.amazon.com/polly/latest/dg/what-is.html). LanguageCode *string `type:"string"` // The long code to send the voice message from. This value should be one of @@ -12778,7 +12962,7 @@ type VoiceMessage struct { Substitutions map[string][]string `type:"map"` // The name of the voice to use when delivering the message. For a list of supported - // voices, see the Amazon Polly Developer Guide (AmazonPollyDG.html). + // voices, see the Amazon Polly Developer Guide (https://docs.aws.amazon.com/polly/latest/dg/what-is.html). VoiceId *string `type:"string"` } @@ -12833,6 +13017,236 @@ func (s VoiceMessage) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Specifies the content and settings for a message template that can be used +// in messages that are sent through the voice channel. +type VoiceTemplateRequest struct { + _ struct{} `type:"structure"` + + // The text of the script to use in messages that are based on the message template, + // in plain text format. + Body *string `type:"string"` + + // A JSON object that specifies the default values to use for message variables + // in the message template. This object is a set of key-value pairs. Each key + // defines a message variable in the template. The corresponding value defines + // the default value for that variable. When you create a message that's based + // on the template, you can override these defaults with message-specific and + // address-specific variables and values. + DefaultSubstitutions *string `type:"string"` + + // The code for the language to use when synthesizing the text of the script + // in messages that are based on the message template. For a list of supported + // languages and the code for each one, see the Amazon Polly Developer Guide + // (https://docs.aws.amazon.com/polly/latest/dg/what-is.html). + LanguageCode *string `type:"string"` + + // A string-to-string map of key-value pairs that defines the tags to associate + // with the message template. Each tag consists of a required tag key and an + // associated tag value. + Tags map[string]string `locationName:"tags" type:"map"` + + // A custom description of the message template. + TemplateDescription *string `type:"string"` + + // The name of the voice to use when delivering messages that are based on the + // message template. For a list of supported voices, see the Amazon Polly Developer + // Guide (https://docs.aws.amazon.com/polly/latest/dg/what-is.html). + VoiceId *string `type:"string"` +} + +// String returns the string representation +func (s VoiceTemplateRequest) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s VoiceTemplateRequest) MarshalFields(e protocol.FieldEncoder) error { + if s.Body != nil { + v := *s.Body + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Body", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DefaultSubstitutions != nil { + v := *s.DefaultSubstitutions + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DefaultSubstitutions", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.LanguageCode != nil { + v := *s.LanguageCode + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LanguageCode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.TemplateDescription != nil { + v := *s.TemplateDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.VoiceId != nil { + v := *s.VoiceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VoiceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Provides information about the content and settings for a message template +// that can be used in messages that are sent through the voice channel. +type VoiceTemplateResponse struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the message template. + Arn *string `type:"string"` + + // The text of the script that's used in messages that are based on the message + // template, in plain text format. + Body *string `type:"string"` + + // The date when the message template was created. + // + // CreationDate is a required field + CreationDate *string `type:"string" required:"true"` + + // The JSON object that specifies the default values that are used for message + // variables in the message template. This object is a set of key-value pairs. + // Each key defines a message variable in the template. The corresponding value + // defines the default value for that variable. + DefaultSubstitutions *string `type:"string"` + + // The code for the language that's used when synthesizing the text of the script + // in messages that are based on the message template. For a list of supported + // languages and the code for each one, see the Amazon Polly Developer Guide + // (https://docs.aws.amazon.com/polly/latest/dg/what-is.html). + LanguageCode *string `type:"string"` + + // The date when the message template was last modified. + // + // LastModifiedDate is a required field + LastModifiedDate *string `type:"string" required:"true"` + + // A string-to-string map of key-value pairs that identifies the tags that are + // associated with the message template. Each tag consists of a required tag + // key and an associated tag value. + Tags map[string]string `locationName:"tags" type:"map"` + + // The custom description of the message template. + TemplateDescription *string `type:"string"` + + // The name of the message template. + // + // TemplateName is a required field + TemplateName *string `type:"string" required:"true"` + + // The type of channel that the message template is designed for. For a voice + // template, this value is VOICE. + // + // TemplateType is a required field + TemplateType TemplateType `type:"string" required:"true" enum:"true"` + + // The name of the voice that's used when delivering messages that are based + // on the message template. For a list of supported voices, see the Amazon Polly + // Developer Guide (https://docs.aws.amazon.com/polly/latest/dg/what-is.html). + VoiceId *string `type:"string"` +} + +// String returns the string representation +func (s VoiceTemplateResponse) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s VoiceTemplateResponse) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Body != nil { + v := *s.Body + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Body", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreationDate != nil { + v := *s.CreationDate + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreationDate", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DefaultSubstitutions != nil { + v := *s.DefaultSubstitutions + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DefaultSubstitutions", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.LanguageCode != nil { + v := *s.LanguageCode + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LanguageCode", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.LastModifiedDate != nil { + v := *s.LastModifiedDate + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastModifiedDate", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "tags", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetValue(k1, protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ms0.End() + + } + if s.TemplateDescription != nil { + v := *s.TemplateDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateName != nil { + v := *s.TemplateName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.TemplateType) > 0 { + v := s.TemplateType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.VoiceId != nil { + v := *s.VoiceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VoiceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + // Specifies the settings for a wait activity in a journey. This type of activity // waits for a certain amount of time or until a specific date and time before // moving participants to the next activity in a journey. @@ -12876,7 +13290,7 @@ func (s WaitActivity) MarshalFields(e protocol.FieldEncoder) error { type WaitTime struct { _ struct{} `type:"structure"` - // The amount of time, as a duration in ISO 8601 format, to wait before determining + // The amount of time to wait, as a duration in ISO 8601 format, before determining // whether the activity's conditions have been met or moving participants to // the next activity in the journey. WaitFor *string `type:"string"` @@ -12993,7 +13407,7 @@ type WriteCampaignRequest struct { // in addition to the default treatment for the campaign. AdditionalTreatments []WriteTreatmentResource `type:"list"` - // The custom description of the campaign. + // A custom description of the campaign. Description *string `type:"string"` // The allocated percentage of users (segment members) who shouldn't receive @@ -13033,7 +13447,7 @@ type WriteCampaignRequest struct { // The message template to use for the campaign. TemplateConfiguration *TemplateConfiguration `type:"structure"` - // The custom description of a variation of the campaign to use for A/B testing. + // A custom description of a variation of the campaign to use for A/B testing. TreatmentDescription *string `type:"string"` // The custom name of a variation of the campaign to use for A/B testing. @@ -13534,7 +13948,7 @@ type WriteTreatmentResource struct { // The message template to use for the treatment. TemplateConfiguration *TemplateConfiguration `type:"structure"` - // The custom description of the treatment. + // A custom description of the treatment. TreatmentDescription *string `type:"string"` // The custom name of the treatment. A treatment is a variation of a campaign diff --git a/service/pinpoint/pinpointiface/interface.go b/service/pinpoint/pinpointiface/interface.go index cd5e0af422d..5f874da8c5d 100644 --- a/service/pinpoint/pinpointiface/interface.go +++ b/service/pinpoint/pinpointiface/interface.go @@ -79,6 +79,8 @@ type ClientAPI interface { CreateSmsTemplateRequest(*pinpoint.CreateSmsTemplateInput) pinpoint.CreateSmsTemplateRequest + CreateVoiceTemplateRequest(*pinpoint.CreateVoiceTemplateInput) pinpoint.CreateVoiceTemplateRequest + DeleteAdmChannelRequest(*pinpoint.DeleteAdmChannelInput) pinpoint.DeleteAdmChannelRequest DeleteApnsChannelRequest(*pinpoint.DeleteApnsChannelInput) pinpoint.DeleteApnsChannelRequest @@ -119,6 +121,8 @@ type ClientAPI interface { DeleteVoiceChannelRequest(*pinpoint.DeleteVoiceChannelInput) pinpoint.DeleteVoiceChannelRequest + DeleteVoiceTemplateRequest(*pinpoint.DeleteVoiceTemplateInput) pinpoint.DeleteVoiceTemplateRequest + GetAdmChannelRequest(*pinpoint.GetAdmChannelInput) pinpoint.GetAdmChannelRequest GetApnsChannelRequest(*pinpoint.GetApnsChannelInput) pinpoint.GetApnsChannelRequest @@ -201,6 +205,8 @@ type ClientAPI interface { GetVoiceChannelRequest(*pinpoint.GetVoiceChannelInput) pinpoint.GetVoiceChannelRequest + GetVoiceTemplateRequest(*pinpoint.GetVoiceTemplateInput) pinpoint.GetVoiceTemplateRequest + ListJourneysRequest(*pinpoint.ListJourneysInput) pinpoint.ListJourneysRequest ListTagsForResourceRequest(*pinpoint.ListTagsForResourceInput) pinpoint.ListTagsForResourceRequest @@ -262,6 +268,8 @@ type ClientAPI interface { UpdateSmsTemplateRequest(*pinpoint.UpdateSmsTemplateInput) pinpoint.UpdateSmsTemplateRequest UpdateVoiceChannelRequest(*pinpoint.UpdateVoiceChannelInput) pinpoint.UpdateVoiceChannelRequest + + UpdateVoiceTemplateRequest(*pinpoint.UpdateVoiceTemplateInput) pinpoint.UpdateVoiceTemplateRequest } var _ ClientAPI = (*pinpoint.Client)(nil) diff --git a/service/quicksight/api_enums.go b/service/quicksight/api_enums.go index ae16778eec2..62d7e6dd43c 100644 --- a/service/quicksight/api_enums.go +++ b/service/quicksight/api_enums.go @@ -2,17 +2,187 @@ package quicksight +type AssignmentStatus string + +// Enum values for AssignmentStatus +const ( + AssignmentStatusEnabled AssignmentStatus = "ENABLED" + AssignmentStatusDraft AssignmentStatus = "DRAFT" + AssignmentStatusDisabled AssignmentStatus = "DISABLED" +) + +func (enum AssignmentStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum AssignmentStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type ColumnDataType string + +// Enum values for ColumnDataType +const ( + ColumnDataTypeString ColumnDataType = "STRING" + ColumnDataTypeInteger ColumnDataType = "INTEGER" + ColumnDataTypeDecimal ColumnDataType = "DECIMAL" + ColumnDataTypeDatetime ColumnDataType = "DATETIME" +) + +func (enum ColumnDataType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ColumnDataType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type DashboardBehavior string + +// Enum values for DashboardBehavior +const ( + DashboardBehaviorEnabled DashboardBehavior = "ENABLED" + DashboardBehaviorDisabled DashboardBehavior = "DISABLED" +) + +func (enum DashboardBehavior) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DashboardBehavior) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type DashboardErrorType string + +// Enum values for DashboardErrorType +const ( + DashboardErrorTypeDataSetNotFound DashboardErrorType = "DATA_SET_NOT_FOUND" + DashboardErrorTypeInternalFailure DashboardErrorType = "INTERNAL_FAILURE" + DashboardErrorTypeParameterValueIncompatible DashboardErrorType = "PARAMETER_VALUE_INCOMPATIBLE" + DashboardErrorTypeParameterTypeInvalid DashboardErrorType = "PARAMETER_TYPE_INVALID" + DashboardErrorTypeParameterNotFound DashboardErrorType = "PARAMETER_NOT_FOUND" + DashboardErrorTypeColumnTypeMismatch DashboardErrorType = "COLUMN_TYPE_MISMATCH" + DashboardErrorTypeColumnGeographicRoleMismatch DashboardErrorType = "COLUMN_GEOGRAPHIC_ROLE_MISMATCH" + DashboardErrorTypeColumnReplacementMissing DashboardErrorType = "COLUMN_REPLACEMENT_MISSING" +) + +func (enum DashboardErrorType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DashboardErrorType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type DashboardUIState string + +// Enum values for DashboardUIState +const ( + DashboardUIStateExpanded DashboardUIState = "EXPANDED" + DashboardUIStateCollapsed DashboardUIState = "COLLAPSED" +) + +func (enum DashboardUIState) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DashboardUIState) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type DataSetImportMode string + +// Enum values for DataSetImportMode +const ( + DataSetImportModeSpice DataSetImportMode = "SPICE" + DataSetImportModeDirectQuery DataSetImportMode = "DIRECT_QUERY" +) + +func (enum DataSetImportMode) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DataSetImportMode) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type DataSourceErrorInfoType string + +// Enum values for DataSourceErrorInfoType +const ( + DataSourceErrorInfoTypeTimeout DataSourceErrorInfoType = "TIMEOUT" + DataSourceErrorInfoTypeEngineVersionNotSupported DataSourceErrorInfoType = "ENGINE_VERSION_NOT_SUPPORTED" + DataSourceErrorInfoTypeUnknownHost DataSourceErrorInfoType = "UNKNOWN_HOST" + DataSourceErrorInfoTypeGenericSqlFailure DataSourceErrorInfoType = "GENERIC_SQL_FAILURE" + DataSourceErrorInfoTypeConflict DataSourceErrorInfoType = "CONFLICT" + DataSourceErrorInfoTypeUnknown DataSourceErrorInfoType = "UNKNOWN" +) + +func (enum DataSourceErrorInfoType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DataSourceErrorInfoType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type DataSourceType string + +// Enum values for DataSourceType +const ( + DataSourceTypeAdobeAnalytics DataSourceType = "ADOBE_ANALYTICS" + DataSourceTypeAmazonElasticsearch DataSourceType = "AMAZON_ELASTICSEARCH" + DataSourceTypeAthena DataSourceType = "ATHENA" + DataSourceTypeAurora DataSourceType = "AURORA" + DataSourceTypeAuroraPostgresql DataSourceType = "AURORA_POSTGRESQL" + DataSourceTypeAwsIotAnalytics DataSourceType = "AWS_IOT_ANALYTICS" + DataSourceTypeGithub DataSourceType = "GITHUB" + DataSourceTypeJira DataSourceType = "JIRA" + DataSourceTypeMariadb DataSourceType = "MARIADB" + DataSourceTypeMysql DataSourceType = "MYSQL" + DataSourceTypePostgresql DataSourceType = "POSTGRESQL" + DataSourceTypePresto DataSourceType = "PRESTO" + DataSourceTypeRedshift DataSourceType = "REDSHIFT" + DataSourceTypeS3 DataSourceType = "S3" + DataSourceTypeSalesforce DataSourceType = "SALESFORCE" + DataSourceTypeServicenow DataSourceType = "SERVICENOW" + DataSourceTypeSnowflake DataSourceType = "SNOWFLAKE" + DataSourceTypeSpark DataSourceType = "SPARK" + DataSourceTypeSqlserver DataSourceType = "SQLSERVER" + DataSourceTypeTeradata DataSourceType = "TERADATA" + DataSourceTypeTwitter DataSourceType = "TWITTER" +) + +func (enum DataSourceType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DataSourceType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type ExceptionResourceType string // Enum values for ExceptionResourceType const ( - ExceptionResourceTypeUser ExceptionResourceType = "USER" - ExceptionResourceTypeGroup ExceptionResourceType = "GROUP" - ExceptionResourceTypeNamespace ExceptionResourceType = "NAMESPACE" - ExceptionResourceTypeDataSource ExceptionResourceType = "DATA_SOURCE" - ExceptionResourceTypeDataSet ExceptionResourceType = "DATA_SET" - ExceptionResourceTypeVpcConnection ExceptionResourceType = "VPC_CONNECTION" - ExceptionResourceTypeIngestion ExceptionResourceType = "INGESTION" + ExceptionResourceTypeUser ExceptionResourceType = "USER" + ExceptionResourceTypeGroup ExceptionResourceType = "GROUP" + ExceptionResourceTypeNamespace ExceptionResourceType = "NAMESPACE" + ExceptionResourceTypeAccountSettings ExceptionResourceType = "ACCOUNT_SETTINGS" + ExceptionResourceTypeIampolicyAssignment ExceptionResourceType = "IAMPOLICY_ASSIGNMENT" + ExceptionResourceTypeDataSource ExceptionResourceType = "DATA_SOURCE" + ExceptionResourceTypeDataSet ExceptionResourceType = "DATA_SET" + ExceptionResourceTypeVpcConnection ExceptionResourceType = "VPC_CONNECTION" + ExceptionResourceTypeIngestion ExceptionResourceType = "INGESTION" ) func (enum ExceptionResourceType) MarshalValue() (string, error) { @@ -24,6 +194,65 @@ func (enum ExceptionResourceType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type FileFormat string + +// Enum values for FileFormat +const ( + FileFormatCsv FileFormat = "CSV" + FileFormatTsv FileFormat = "TSV" + FileFormatClf FileFormat = "CLF" + FileFormatElf FileFormat = "ELF" + FileFormatXlsx FileFormat = "XLSX" + FileFormatJson FileFormat = "JSON" +) + +func (enum FileFormat) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum FileFormat) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type GeoSpatialCountryCode string + +// Enum values for GeoSpatialCountryCode +const ( + GeoSpatialCountryCodeUs GeoSpatialCountryCode = "US" +) + +func (enum GeoSpatialCountryCode) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum GeoSpatialCountryCode) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type GeoSpatialDataRole string + +// Enum values for GeoSpatialDataRole +const ( + GeoSpatialDataRoleCountry GeoSpatialDataRole = "COUNTRY" + GeoSpatialDataRoleState GeoSpatialDataRole = "STATE" + GeoSpatialDataRoleCounty GeoSpatialDataRole = "COUNTY" + GeoSpatialDataRoleCity GeoSpatialDataRole = "CITY" + GeoSpatialDataRolePostcode GeoSpatialDataRole = "POSTCODE" + GeoSpatialDataRoleLongitude GeoSpatialDataRole = "LONGITUDE" + GeoSpatialDataRoleLatitude GeoSpatialDataRole = "LATITUDE" +) + +func (enum GeoSpatialDataRole) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum GeoSpatialDataRole) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type IdentityType string // Enum values for IdentityType @@ -41,6 +270,231 @@ func (enum IdentityType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type IngestionErrorType string + +// Enum values for IngestionErrorType +const ( + IngestionErrorTypeFailureToAssumeRole IngestionErrorType = "FAILURE_TO_ASSUME_ROLE" + IngestionErrorTypeIngestionSuperseded IngestionErrorType = "INGESTION_SUPERSEDED" + IngestionErrorTypeIngestionCanceled IngestionErrorType = "INGESTION_CANCELED" + IngestionErrorTypeDataSetDeleted IngestionErrorType = "DATA_SET_DELETED" + IngestionErrorTypeDataSetNotSpice IngestionErrorType = "DATA_SET_NOT_SPICE" + IngestionErrorTypeS3UploadedFileDeleted IngestionErrorType = "S3_UPLOADED_FILE_DELETED" + IngestionErrorTypeS3ManifestError IngestionErrorType = "S3_MANIFEST_ERROR" + IngestionErrorTypeDataToleranceException IngestionErrorType = "DATA_TOLERANCE_EXCEPTION" + IngestionErrorTypeSpiceTableNotFound IngestionErrorType = "SPICE_TABLE_NOT_FOUND" + IngestionErrorTypeDataSetSizeLimitExceeded IngestionErrorType = "DATA_SET_SIZE_LIMIT_EXCEEDED" + IngestionErrorTypeRowSizeLimitExceeded IngestionErrorType = "ROW_SIZE_LIMIT_EXCEEDED" + IngestionErrorTypeAccountCapacityLimitExceeded IngestionErrorType = "ACCOUNT_CAPACITY_LIMIT_EXCEEDED" + IngestionErrorTypeCustomerError IngestionErrorType = "CUSTOMER_ERROR" + IngestionErrorTypeDataSourceNotFound IngestionErrorType = "DATA_SOURCE_NOT_FOUND" + IngestionErrorTypeIamRoleNotAvailable IngestionErrorType = "IAM_ROLE_NOT_AVAILABLE" + IngestionErrorTypeConnectionFailure IngestionErrorType = "CONNECTION_FAILURE" + IngestionErrorTypeSqlTableNotFound IngestionErrorType = "SQL_TABLE_NOT_FOUND" + IngestionErrorTypePermissionDenied IngestionErrorType = "PERMISSION_DENIED" + IngestionErrorTypeSslCertificateValidationFailure IngestionErrorType = "SSL_CERTIFICATE_VALIDATION_FAILURE" + IngestionErrorTypeOauthTokenFailure IngestionErrorType = "OAUTH_TOKEN_FAILURE" + IngestionErrorTypeSourceApiLimitExceededFailure IngestionErrorType = "SOURCE_API_LIMIT_EXCEEDED_FAILURE" + IngestionErrorTypePasswordAuthenticationFailure IngestionErrorType = "PASSWORD_AUTHENTICATION_FAILURE" + IngestionErrorTypeSqlSchemaMismatchError IngestionErrorType = "SQL_SCHEMA_MISMATCH_ERROR" + IngestionErrorTypeInvalidDateFormat IngestionErrorType = "INVALID_DATE_FORMAT" + IngestionErrorTypeInvalidDataprepSyntax IngestionErrorType = "INVALID_DATAPREP_SYNTAX" + IngestionErrorTypeSourceResourceLimitExceeded IngestionErrorType = "SOURCE_RESOURCE_LIMIT_EXCEEDED" + IngestionErrorTypeSqlInvalidParameterValue IngestionErrorType = "SQL_INVALID_PARAMETER_VALUE" + IngestionErrorTypeQueryTimeout IngestionErrorType = "QUERY_TIMEOUT" + IngestionErrorTypeSqlNumericOverflow IngestionErrorType = "SQL_NUMERIC_OVERFLOW" + IngestionErrorTypeUnresolvableHost IngestionErrorType = "UNRESOLVABLE_HOST" + IngestionErrorTypeUnroutableHost IngestionErrorType = "UNROUTABLE_HOST" + IngestionErrorTypeSqlException IngestionErrorType = "SQL_EXCEPTION" + IngestionErrorTypeS3FileInaccessible IngestionErrorType = "S3_FILE_INACCESSIBLE" + IngestionErrorTypeIotFileNotFound IngestionErrorType = "IOT_FILE_NOT_FOUND" + IngestionErrorTypeIotDataSetFileEmpty IngestionErrorType = "IOT_DATA_SET_FILE_EMPTY" + IngestionErrorTypeInvalidDataSourceConfig IngestionErrorType = "INVALID_DATA_SOURCE_CONFIG" + IngestionErrorTypeDataSourceAuthFailed IngestionErrorType = "DATA_SOURCE_AUTH_FAILED" + IngestionErrorTypeDataSourceConnectionFailed IngestionErrorType = "DATA_SOURCE_CONNECTION_FAILED" + IngestionErrorTypeFailureToProcessJsonFile IngestionErrorType = "FAILURE_TO_PROCESS_JSON_FILE" + IngestionErrorTypeInternalServiceError IngestionErrorType = "INTERNAL_SERVICE_ERROR" +) + +func (enum IngestionErrorType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum IngestionErrorType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type IngestionRequestSource string + +// Enum values for IngestionRequestSource +const ( + IngestionRequestSourceManual IngestionRequestSource = "MANUAL" + IngestionRequestSourceScheduled IngestionRequestSource = "SCHEDULED" +) + +func (enum IngestionRequestSource) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum IngestionRequestSource) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type IngestionRequestType string + +// Enum values for IngestionRequestType +const ( + IngestionRequestTypeInitialIngestion IngestionRequestType = "INITIAL_INGESTION" + IngestionRequestTypeEdit IngestionRequestType = "EDIT" + IngestionRequestTypeIncrementalRefresh IngestionRequestType = "INCREMENTAL_REFRESH" + IngestionRequestTypeFullRefresh IngestionRequestType = "FULL_REFRESH" +) + +func (enum IngestionRequestType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum IngestionRequestType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type IngestionStatus string + +// Enum values for IngestionStatus +const ( + IngestionStatusInitialized IngestionStatus = "INITIALIZED" + IngestionStatusQueued IngestionStatus = "QUEUED" + IngestionStatusRunning IngestionStatus = "RUNNING" + IngestionStatusFailed IngestionStatus = "FAILED" + IngestionStatusCompleted IngestionStatus = "COMPLETED" + IngestionStatusCancelled IngestionStatus = "CANCELLED" +) + +func (enum IngestionStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum IngestionStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type InputColumnDataType string + +// Enum values for InputColumnDataType +const ( + InputColumnDataTypeString InputColumnDataType = "STRING" + InputColumnDataTypeInteger InputColumnDataType = "INTEGER" + InputColumnDataTypeDecimal InputColumnDataType = "DECIMAL" + InputColumnDataTypeDatetime InputColumnDataType = "DATETIME" + InputColumnDataTypeBit InputColumnDataType = "BIT" + InputColumnDataTypeBoolean InputColumnDataType = "BOOLEAN" + InputColumnDataTypeJson InputColumnDataType = "JSON" +) + +func (enum InputColumnDataType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum InputColumnDataType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type JoinType string + +// Enum values for JoinType +const ( + JoinTypeInner JoinType = "INNER" + JoinTypeOuter JoinType = "OUTER" + JoinTypeLeft JoinType = "LEFT" + JoinTypeRight JoinType = "RIGHT" +) + +func (enum JoinType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum JoinType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type ResourceStatus string + +// Enum values for ResourceStatus +const ( + ResourceStatusCreationInProgress ResourceStatus = "CREATION_IN_PROGRESS" + ResourceStatusCreationSuccessful ResourceStatus = "CREATION_SUCCESSFUL" + ResourceStatusCreationFailed ResourceStatus = "CREATION_FAILED" + ResourceStatusUpdateInProgress ResourceStatus = "UPDATE_IN_PROGRESS" + ResourceStatusUpdateSuccessful ResourceStatus = "UPDATE_SUCCESSFUL" + ResourceStatusUpdateFailed ResourceStatus = "UPDATE_FAILED" +) + +func (enum ResourceStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ResourceStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type RowLevelPermissionPolicy string + +// Enum values for RowLevelPermissionPolicy +const ( + RowLevelPermissionPolicyGrantAccess RowLevelPermissionPolicy = "GRANT_ACCESS" + RowLevelPermissionPolicyDenyAccess RowLevelPermissionPolicy = "DENY_ACCESS" +) + +func (enum RowLevelPermissionPolicy) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum RowLevelPermissionPolicy) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type TemplateErrorType string + +// Enum values for TemplateErrorType +const ( + TemplateErrorTypeDataSetNotFound TemplateErrorType = "DATA_SET_NOT_FOUND" + TemplateErrorTypeInternalFailure TemplateErrorType = "INTERNAL_FAILURE" +) + +func (enum TemplateErrorType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum TemplateErrorType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type TextQualifier string + +// Enum values for TextQualifier +const ( + TextQualifierDoubleQuote TextQualifier = "DOUBLE_QUOTE" + TextQualifierSingleQuote TextQualifier = "SINGLE_QUOTE" +) + +func (enum TextQualifier) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum TextQualifier) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type UserRole string // Enum values for UserRole diff --git a/service/quicksight/api_errors.go b/service/quicksight/api_errors.go index a2b88bf6ae4..708417958ce 100644 --- a/service/quicksight/api_errors.go +++ b/service/quicksight/api_errors.go @@ -13,6 +13,19 @@ const ( // the correct permissions, and that you are using the correct access keys. ErrCodeAccessDeniedException = "AccessDeniedException" + // ErrCodeConcurrentUpdatingException for service response error code + // "ConcurrentUpdatingException". + // + // A resource is already in an "actionable" state that must complete before + // a new update can be applied. + ErrCodeConcurrentUpdatingException = "ConcurrentUpdatingException" + + // ErrCodeConflictException for service response error code + // "ConflictException". + // + // Updating or deleting a resource can cause an inconsistent state. + ErrCodeConflictException = "ConflictException" + // ErrCodeDomainNotWhitelistedException for service response error code // "DomainNotWhitelistedException". // @@ -68,7 +81,7 @@ const ( // ErrCodeResourceExistsException for service response error code // "ResourceExistsException". // - // The resource specified doesn't exist. + // The resource specified already exists. ErrCodeResourceExistsException = "ResourceExistsException" // ErrCodeResourceNotFoundException for service response error code diff --git a/service/quicksight/api_op_CancelIngestion.go b/service/quicksight/api_op_CancelIngestion.go new file mode 100644 index 00000000000..499055844d1 --- /dev/null +++ b/service/quicksight/api_op_CancelIngestion.go @@ -0,0 +1,201 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CancelIngestionInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset used in the ingestion. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // An ID for the ingestion. + // + // IngestionId is a required field + IngestionId *string `location:"uri" locationName:"IngestionId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelIngestionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelIngestionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CancelIngestionInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.IngestionId == nil { + invalidParams.Add(aws.NewErrParamRequired("IngestionId")) + } + if s.IngestionId != nil && len(*s.IngestionId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("IngestionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CancelIngestionInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IngestionId != nil { + v := *s.IngestionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "IngestionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CancelIngestionOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the data ingestion. + Arn *string `type:"string"` + + // An ID for the ingestion. + IngestionId *string `min:"1" type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CancelIngestionOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CancelIngestionOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IngestionId != nil { + v := *s.IngestionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IngestionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opCancelIngestion = "CancelIngestion" + +// CancelIngestionRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Cancels an on-going ingestion of data into SPICE. +// +// // Example sending a request using CancelIngestionRequest. +// req := client.CancelIngestionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CancelIngestion +func (c *Client) CancelIngestionRequest(input *CancelIngestionInput) CancelIngestionRequest { + op := &aws.Operation{ + Name: opCancelIngestion, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions/{IngestionId}", + } + + if input == nil { + input = &CancelIngestionInput{} + } + + req := c.newRequest(op, input, &CancelIngestionOutput{}) + return CancelIngestionRequest{Request: req, Input: input, Copy: c.CancelIngestionRequest} +} + +// CancelIngestionRequest is the request type for the +// CancelIngestion API operation. +type CancelIngestionRequest struct { + *aws.Request + Input *CancelIngestionInput + Copy func(*CancelIngestionInput) CancelIngestionRequest +} + +// Send marshals and sends the CancelIngestion API request. +func (r CancelIngestionRequest) Send(ctx context.Context) (*CancelIngestionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CancelIngestionResponse{ + CancelIngestionOutput: r.Request.Data.(*CancelIngestionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CancelIngestionResponse is the response type for the +// CancelIngestion API operation. +type CancelIngestionResponse struct { + *CancelIngestionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CancelIngestion request. +func (r *CancelIngestionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_CreateDashboard.go b/service/quicksight/api_op_CreateDashboard.go new file mode 100644 index 00000000000..ae9a8e6375e --- /dev/null +++ b/service/quicksight/api_op_CreateDashboard.go @@ -0,0 +1,370 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateDashboardInput struct { + _ struct{} `type:"structure"` + + // AWS account ID where you want to create the dashboard. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dashboard, also added to IAM policy. + // + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + + // Publishing options when creating dashboard. + // + // * AvailabilityStatus for AdHocFilteringOption - This can be either ENABLED + // or DISABLED. When This is set to set to DISABLED, QuickSight disables + // the left filter pane on the published dashboard, which can be used for + // AdHoc filtering. Enabled by default. + // + // * AvailabilityStatus for ExportToCSVOption - This can be either ENABLED + // or DISABLED. The visual option to export data to CSV is disabled when + // this is set to DISABLED. Enabled by default. + // + // * VisibilityState for SheetControlsOption - This can be either COLLAPSED + // or EXPANDED. The sheet controls pane is collapsed by default when set + // to true. Collapsed by default. + // + // Shorthand Syntax: + // + // AdHocFilteringDisabled=boolean,ExportToCSVDisabled=boolean,SheetControlsCollapsed=boolean + DashboardPublishOptions *DashboardPublishOptions `type:"structure"` + + // The display name of the dashboard. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A structure that contains the parameters of the dashboard. These are parameter + // overrides for a dashboard. A dashboard can have any type of parameters and + // some parameters might accept multiple values. You could use the following + // structure to override two string parameters that accept multiple values: + Parameters *Parameters `type:"structure"` + + // A structure that contains the permissions of the dashboard. You can use this + // for granting permissions with principal and action information. + Permissions []ResourcePermission `min:"1" type:"list"` + + // Source entity from which the dashboard is created. The souce entity accepts + // the ARN of the source template or analysis and also references the replacement + // datasets for the placeholders set when creating the template. The replacement + // datasets need to follow the same schema as the datasets for which placeholders + // were created when creating the template. + // + // If you are creating a dashboard from a source entity in a different AWS account, + // use the ARN of the source template. + // + // SourceEntity is a required field + SourceEntity *DashboardSourceEntity `type:"structure" required:"true"` + + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the dashboard. + Tags []Tag `min:"1" type:"list"` + + // A description for the first version of the dashboard being created. + VersionDescription *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateDashboardInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDashboardInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateDashboardInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DashboardId == nil { + invalidParams.Add(aws.NewErrParamRequired("DashboardId")) + } + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DashboardId", 1)) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + if s.Permissions != nil && len(s.Permissions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Permissions", 1)) + } + + if s.SourceEntity == nil { + invalidParams.Add(aws.NewErrParamRequired("SourceEntity")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) + } + if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("VersionDescription", 1)) + } + if s.Parameters != nil { + if err := s.Parameters.Validate(); err != nil { + invalidParams.AddNested("Parameters", err.(aws.ErrInvalidParams)) + } + } + if s.Permissions != nil { + for i, v := range s.Permissions { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.SourceEntity != nil { + if err := s.SourceEntity.Validate(); err != nil { + invalidParams.AddNested("SourceEntity", err.(aws.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateDashboardInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DashboardPublishOptions != nil { + v := s.DashboardPublishOptions + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DashboardPublishOptions", v, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Parameters != nil { + v := s.Parameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Parameters", v, metadata) + } + if s.Permissions != nil { + v := s.Permissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Permissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.SourceEntity != nil { + v := s.SourceEntity + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SourceEntity", v, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.VersionDescription != nil { + v := *s.VersionDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VersionDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateDashboardOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the dashboard. + Arn *string `type:"string"` + + // The creation status of the dashboard create request. + CreationStatus ResourceStatus `type:"string" enum:"true"` + + // The ID for the dashboard. + DashboardId *string `min:"1" type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The ARN of the dashboard, including the version number of the first version + // that is created. + VersionArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateDashboardOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateDashboardOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.CreationStatus) > 0 { + v := s.CreationStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreationStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.VersionArn != nil { + v := *s.VersionArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VersionArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opCreateDashboard = "CreateDashboard" + +// CreateDashboardRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Creates a dashboard from a template. To first create a template, see the +// CreateTemplate API. +// +// A dashboard is an entity in QuickSight which identifies Quicksight reports, +// created from analyses. QuickSight dashboards are sharable. With the right +// permissions, you can create scheduled email reports from them. The CreateDashboard, +// DescribeDashboard and ListDashboardsByUser APIs act on the dashboard entity. +// If you have the correct permissions, you can create a dashboard from a template +// that exists in a different AWS account. +// +// CLI syntax: +// +// aws quicksight create-dashboard --cli-input-json file://create-dashboard.json +// +// // Example sending a request using CreateDashboardRequest. +// req := client.CreateDashboardRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateDashboard +func (c *Client) CreateDashboardRequest(input *CreateDashboardInput) CreateDashboardRequest { + op := &aws.Operation{ + Name: opCreateDashboard, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}", + } + + if input == nil { + input = &CreateDashboardInput{} + } + + req := c.newRequest(op, input, &CreateDashboardOutput{}) + return CreateDashboardRequest{Request: req, Input: input, Copy: c.CreateDashboardRequest} +} + +// CreateDashboardRequest is the request type for the +// CreateDashboard API operation. +type CreateDashboardRequest struct { + *aws.Request + Input *CreateDashboardInput + Copy func(*CreateDashboardInput) CreateDashboardRequest +} + +// Send marshals and sends the CreateDashboard API request. +func (r CreateDashboardRequest) Send(ctx context.Context) (*CreateDashboardResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateDashboardResponse{ + CreateDashboardOutput: r.Request.Data.(*CreateDashboardOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateDashboardResponse is the response type for the +// CreateDashboard API operation. +type CreateDashboardResponse struct { + *CreateDashboardOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateDashboard request. +func (r *CreateDashboardResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_CreateDataSet.go b/service/quicksight/api_op_CreateDataSet.go new file mode 100644 index 00000000000..fb8779cfd76 --- /dev/null +++ b/service/quicksight/api_op_CreateDataSet.go @@ -0,0 +1,424 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateDataSetInput struct { + _ struct{} `type:"structure"` + + // The AWS Account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // Groupings of columns that work together in certain QuickSight features. Currently + // only geospatial hierarchy is supported. + ColumnGroups []ColumnGroup `min:"1" type:"list"` + + // An ID for the dataset you want to create. This is unique per region per AWS + // account. + // + // DataSetId is a required field + DataSetId *string `type:"string" required:"true"` + + // Indicates whether or not you want to import the data into SPICE. + // + // ImportMode is a required field + ImportMode DataSetImportMode `type:"string" required:"true" enum:"true"` + + // Configures the combination and transformation of the data from the physical + // tables. + LogicalTableMap map[string]LogicalTable `min:"1" type:"map"` + + // The display name for the dataset. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A list of resource permissions on the dataset. + Permissions []ResourcePermission `min:"1" type:"list"` + + // Declares the physical tables that are available in the underlying data sources. + // + // PhysicalTableMap is a required field + PhysicalTableMap map[string]PhysicalTable `min:"1" type:"map" required:"true"` + + // Row-level security configuration on the data you want to create. + RowLevelPermissionDataSet *RowLevelPermissionDataSet `type:"structure"` + + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the dataset. + Tags []Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s CreateDataSetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDataSetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateDataSetInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.ColumnGroups != nil && len(s.ColumnGroups) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ColumnGroups", 1)) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + if len(s.ImportMode) == 0 { + invalidParams.Add(aws.NewErrParamRequired("ImportMode")) + } + if s.LogicalTableMap != nil && len(s.LogicalTableMap) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("LogicalTableMap", 1)) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + if s.Permissions != nil && len(s.Permissions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Permissions", 1)) + } + + if s.PhysicalTableMap == nil { + invalidParams.Add(aws.NewErrParamRequired("PhysicalTableMap")) + } + if s.PhysicalTableMap != nil && len(s.PhysicalTableMap) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PhysicalTableMap", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) + } + if s.ColumnGroups != nil { + for i, v := range s.ColumnGroups { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ColumnGroups", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.LogicalTableMap != nil { + for i, v := range s.LogicalTableMap { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LogicalTableMap", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.Permissions != nil { + for i, v := range s.Permissions { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.PhysicalTableMap != nil { + for i, v := range s.PhysicalTableMap { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PhysicalTableMap", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.RowLevelPermissionDataSet != nil { + if err := s.RowLevelPermissionDataSet.Validate(); err != nil { + invalidParams.AddNested("RowLevelPermissionDataSet", err.(aws.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateDataSetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ColumnGroups != nil { + v := s.ColumnGroups + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ColumnGroups", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.ImportMode) > 0 { + v := s.ImportMode + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ImportMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.LogicalTableMap != nil { + v := s.LogicalTableMap + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "LogicalTableMap", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetFields(k1, v1) + } + ms0.End() + + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Permissions != nil { + v := s.Permissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Permissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.PhysicalTableMap != nil { + v := s.PhysicalTableMap + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "PhysicalTableMap", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetFields(k1, v1) + } + ms0.End() + + } + if s.RowLevelPermissionDataSet != nil { + v := s.RowLevelPermissionDataSet + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RowLevelPermissionDataSet", v, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateDataSetOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the dataset. + Arn *string `type:"string"` + + // The ID for the dataset you want to create. This is unique per region per + // AWS account. + DataSetId *string `type:"string"` + + // The Amazon Resource Name (ARN) for the ingestion, which is triggered as a + // result of dataset creation if the import mode is SPICE + IngestionArn *string `type:"string"` + + // The ID of the ingestion, which is triggered as a result of dataset creation + // if the import mode is SPICE + IngestionId *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateDataSetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateDataSetOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IngestionArn != nil { + v := *s.IngestionArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IngestionArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IngestionId != nil { + v := *s.IngestionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IngestionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opCreateDataSet = "CreateDataSet" + +// CreateDataSetRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Creates a dataset. +// +// CLI syntax: +// +// aws quicksight create-data-set \ +// +// --aws-account-id=111122223333 \ +// +// --data-set-id=unique-data-set-id \ +// +// --name='My dataset' \ +// +// --import-mode=SPICE \ +// +// --physical-table-map='{ +// +// "physical-table-id": { +// +// "RelationalTable": { +// +// "DataSourceArn": "arn:aws:quicksight:us-west-2:111111111111:datasource/data-source-id", +// +// "Name": "table1", +// +// "InputColumns": [ +// +// { +// +// "Name": "column1", +// +// "Type": "STRING" +// +// } +// +// ] +// +// } +// +// }' +// +// // Example sending a request using CreateDataSetRequest. +// req := client.CreateDataSetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateDataSet +func (c *Client) CreateDataSetRequest(input *CreateDataSetInput) CreateDataSetRequest { + op := &aws.Operation{ + Name: opCreateDataSet, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/data-sets", + } + + if input == nil { + input = &CreateDataSetInput{} + } + + req := c.newRequest(op, input, &CreateDataSetOutput{}) + return CreateDataSetRequest{Request: req, Input: input, Copy: c.CreateDataSetRequest} +} + +// CreateDataSetRequest is the request type for the +// CreateDataSet API operation. +type CreateDataSetRequest struct { + *aws.Request + Input *CreateDataSetInput + Copy func(*CreateDataSetInput) CreateDataSetRequest +} + +// Send marshals and sends the CreateDataSet API request. +func (r CreateDataSetRequest) Send(ctx context.Context) (*CreateDataSetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateDataSetResponse{ + CreateDataSetOutput: r.Request.Data.(*CreateDataSetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateDataSetResponse is the response type for the +// CreateDataSet API operation. +type CreateDataSetResponse struct { + *CreateDataSetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateDataSet request. +func (r *CreateDataSetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_CreateDataSource.go b/service/quicksight/api_op_CreateDataSource.go new file mode 100644 index 00000000000..ce2a1c1bb46 --- /dev/null +++ b/service/quicksight/api_op_CreateDataSource.go @@ -0,0 +1,360 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateDataSourceInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The credentials QuickSight uses to connect to your underlying source. Currently + // only username/password based credentials are supported. + Credentials *DataSourceCredentials `type:"structure" sensitive:"true"` + + // An ID for the data source. This is unique per AWS Region per AWS account. + // + // DataSourceId is a required field + DataSourceId *string `type:"string" required:"true"` + + // The parameters QuickSight uses to connect to your underlying source. + DataSourceParameters *DataSourceParameters `type:"structure"` + + // A display name for the data source. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A list of resource permissions on the data source. + Permissions []ResourcePermission `min:"1" type:"list"` + + // SSL properties that apply when QuickSight connects to your underlying source. + SslProperties *SslProperties `type:"structure"` + + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the data source. + Tags []Tag `min:"1" type:"list"` + + // The type of the data source. Currently the supported types for this operation + // are: ATHENA, AURORA, AURORA_POSTGRESQL, MARIADB, MYSQL, POSTGRESQL, PRESTO, + // REDSHIFT, S3, SNOWFLAKE, SPARK, SQLSERVER, TERADATA. Use ListDataSources + // to return a list of all data sources. + // + // Type is a required field + Type DataSourceType `type:"string" required:"true" enum:"true"` + + // You need to use this parameter only when you want QuickSight to use a VPC + // connection when connecting to your underlying source. + VpcConnectionProperties *VpcConnectionProperties `type:"structure"` +} + +// String returns the string representation +func (s CreateDataSourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDataSourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateDataSourceInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSourceId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSourceId")) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + if s.Permissions != nil && len(s.Permissions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Permissions", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) + } + if len(s.Type) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Type")) + } + if s.Credentials != nil { + if err := s.Credentials.Validate(); err != nil { + invalidParams.AddNested("Credentials", err.(aws.ErrInvalidParams)) + } + } + if s.DataSourceParameters != nil { + if err := s.DataSourceParameters.Validate(); err != nil { + invalidParams.AddNested("DataSourceParameters", err.(aws.ErrInvalidParams)) + } + } + if s.Permissions != nil { + for i, v := range s.Permissions { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.VpcConnectionProperties != nil { + if err := s.VpcConnectionProperties.Validate(); err != nil { + invalidParams.AddNested("VpcConnectionProperties", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateDataSourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Credentials != nil { + v := s.Credentials + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Credentials", v, metadata) + } + if s.DataSourceId != nil { + v := *s.DataSourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSourceParameters != nil { + v := s.DataSourceParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DataSourceParameters", v, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Permissions != nil { + v := s.Permissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Permissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.SslProperties != nil { + v := s.SslProperties + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SslProperties", v, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.VpcConnectionProperties != nil { + v := s.VpcConnectionProperties + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "VpcConnectionProperties", v, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateDataSourceOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the data source. + Arn *string `type:"string"` + + // The status of creating the data source. + CreationStatus ResourceStatus `type:"string" enum:"true"` + + // The ID of the data source. This is unique per AWS Region per AWS account. + DataSourceId *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateDataSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateDataSourceOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.CreationStatus) > 0 { + v := s.CreationStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreationStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.DataSourceId != nil { + v := *s.DataSourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opCreateDataSource = "CreateDataSource" + +// CreateDataSourceRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Creates a data source. +// +// The permissions resource is arn:aws:quicksight:region:aws-account-id:datasource/data-source-id +// +// CLI syntax: +// +// aws quicksight create-data-source \ +// +// --aws-account-id=111122223333 \ +// +// --data-source-id=unique-data-source-id \ +// +// --name='My Data Source' \ +// +// --type=POSTGRESQL \ +// +// --data-source-parameters='{ "PostgreSqlParameters": { +// +// "Host": "my-db-host.example.com", +// +// "Port": 1234, +// +// "Database": "my-db" } }' \ +// +// --credentials='{ "CredentialPair": { +// +// "Username": "username", +// +// "Password": "password" } }' +// +// // Example sending a request using CreateDataSourceRequest. +// req := client.CreateDataSourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateDataSource +func (c *Client) CreateDataSourceRequest(input *CreateDataSourceInput) CreateDataSourceRequest { + op := &aws.Operation{ + Name: opCreateDataSource, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/data-sources", + } + + if input == nil { + input = &CreateDataSourceInput{} + } + + req := c.newRequest(op, input, &CreateDataSourceOutput{}) + return CreateDataSourceRequest{Request: req, Input: input, Copy: c.CreateDataSourceRequest} +} + +// CreateDataSourceRequest is the request type for the +// CreateDataSource API operation. +type CreateDataSourceRequest struct { + *aws.Request + Input *CreateDataSourceInput + Copy func(*CreateDataSourceInput) CreateDataSourceRequest +} + +// Send marshals and sends the CreateDataSource API request. +func (r CreateDataSourceRequest) Send(ctx context.Context) (*CreateDataSourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateDataSourceResponse{ + CreateDataSourceOutput: r.Request.Data.(*CreateDataSourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateDataSourceResponse is the response type for the +// CreateDataSource API operation. +type CreateDataSourceResponse struct { + *CreateDataSourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateDataSource request. +func (r *CreateDataSourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_CreateIAMPolicyAssignment.go b/service/quicksight/api_op_CreateIAMPolicyAssignment.go new file mode 100644 index 00000000000..cb1194cec0b --- /dev/null +++ b/service/quicksight/api_op_CreateIAMPolicyAssignment.go @@ -0,0 +1,310 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateIAMPolicyAssignmentInput struct { + _ struct{} `type:"structure"` + + // The name of the assignment. It must be unique within an AWS account. + // + // AssignmentName is a required field + AssignmentName *string `min:"1" type:"string" required:"true"` + + // The status of an assignment: + // + // * ENABLED - Anything specified in this assignment is used while creating + // the data source. + // + // * DISABLED - This assignment isn't used while creating the data source. + // + // * DRAFT - Assignment is an unfinished draft and isn't used while creating + // the data source. + // + // AssignmentStatus is a required field + AssignmentStatus AssignmentStatus `type:"string" required:"true" enum:"true"` + + // The AWS Account ID where you want to assign QuickSight users or groups to + // an IAM policy. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // QuickSight users and/or groups that you want to assign the policy to. + Identities map[string][]string `type:"map"` + + // The namespace that contains the assignment. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + + // An IAM policy ARN that you want to apply to the QuickSight users and groups + // specified in this assignment. + PolicyArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateIAMPolicyAssignmentInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateIAMPolicyAssignmentInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateIAMPolicyAssignmentInput"} + + if s.AssignmentName == nil { + invalidParams.Add(aws.NewErrParamRequired("AssignmentName")) + } + if s.AssignmentName != nil && len(*s.AssignmentName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AssignmentName", 1)) + } + if len(s.AssignmentStatus) == 0 { + invalidParams.Add(aws.NewErrParamRequired("AssignmentStatus")) + } + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.Namespace == nil { + invalidParams.Add(aws.NewErrParamRequired("Namespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateIAMPolicyAssignmentInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AssignmentName != nil { + v := *s.AssignmentName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.AssignmentStatus) > 0 { + v := s.AssignmentStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.Identities != nil { + v := s.Identities + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Identities", metadata) + ms0.Start() + for k1, v1 := range v { + ls1 := ms0.List(k1) + ls1.Start() + for _, v2 := range v1 { + ls1.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v2)}) + } + ls1.End() + } + ms0.End() + + } + if s.PolicyArn != nil { + v := *s.PolicyArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PolicyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Namespace != nil { + v := *s.Namespace + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "Namespace", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateIAMPolicyAssignmentOutput struct { + _ struct{} `type:"structure"` + + // An ID for the assignment. + AssignmentId *string `type:"string"` + + // The name of the assignment. Must be unique within an AWS account. + AssignmentName *string `min:"1" type:"string"` + + // The status of an assignment: + // + // * ENABLED - Anything specified in this assignment is used while creating + // the data source. + // + // * DISABLED - This assignment isn't used while creating the data source. + // + // * DRAFT - Assignment is an unfinished draft and isn't used while creating + // the data source. + AssignmentStatus AssignmentStatus `type:"string" enum:"true"` + + // QuickSight users and/or groups that are assigned to the IAM policy. + Identities map[string][]string `type:"map"` + + // An IAM policy ARN that is applied to the QuickSight users and groups specified + // in this assignment. + PolicyArn *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateIAMPolicyAssignmentOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateIAMPolicyAssignmentOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.AssignmentId != nil { + v := *s.AssignmentId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AssignmentName != nil { + v := *s.AssignmentName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.AssignmentStatus) > 0 { + v := s.AssignmentStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.Identities != nil { + v := s.Identities + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Identities", metadata) + ms0.Start() + for k1, v1 := range v { + ls1 := ms0.List(k1) + ls1.Start() + for _, v2 := range v1 { + ls1.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v2)}) + } + ls1.End() + } + ms0.End() + + } + if s.PolicyArn != nil { + v := *s.PolicyArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PolicyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opCreateIAMPolicyAssignment = "CreateIAMPolicyAssignment" + +// CreateIAMPolicyAssignmentRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Creates an assignment with one specified IAM policy ARN and will assigned +// to specified groups or users of QuickSight. Users and groups need to be in +// the same namespace. +// +// CLI syntax: +// +// aws quicksight create-iam-policy-assignment --aws-account-id=111122223333 +// --assignment-name=helpAssignment --policy-arn=arn:aws:iam::aws:policy/AdministratorAccess +// --identities="user=user5,engineer123,group=QS-Admin" --namespace=default +// --region=us-west-2 +// +// // Example sending a request using CreateIAMPolicyAssignmentRequest. +// req := client.CreateIAMPolicyAssignmentRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateIAMPolicyAssignment +func (c *Client) CreateIAMPolicyAssignmentRequest(input *CreateIAMPolicyAssignmentInput) CreateIAMPolicyAssignmentRequest { + op := &aws.Operation{ + Name: opCreateIAMPolicyAssignment, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/", + } + + if input == nil { + input = &CreateIAMPolicyAssignmentInput{} + } + + req := c.newRequest(op, input, &CreateIAMPolicyAssignmentOutput{}) + return CreateIAMPolicyAssignmentRequest{Request: req, Input: input, Copy: c.CreateIAMPolicyAssignmentRequest} +} + +// CreateIAMPolicyAssignmentRequest is the request type for the +// CreateIAMPolicyAssignment API operation. +type CreateIAMPolicyAssignmentRequest struct { + *aws.Request + Input *CreateIAMPolicyAssignmentInput + Copy func(*CreateIAMPolicyAssignmentInput) CreateIAMPolicyAssignmentRequest +} + +// Send marshals and sends the CreateIAMPolicyAssignment API request. +func (r CreateIAMPolicyAssignmentRequest) Send(ctx context.Context) (*CreateIAMPolicyAssignmentResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateIAMPolicyAssignmentResponse{ + CreateIAMPolicyAssignmentOutput: r.Request.Data.(*CreateIAMPolicyAssignmentOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateIAMPolicyAssignmentResponse is the response type for the +// CreateIAMPolicyAssignment API operation. +type CreateIAMPolicyAssignmentResponse struct { + *CreateIAMPolicyAssignmentOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateIAMPolicyAssignment request. +func (r *CreateIAMPolicyAssignmentResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_CreateIngestion.go b/service/quicksight/api_op_CreateIngestion.go new file mode 100644 index 00000000000..ee03412a567 --- /dev/null +++ b/service/quicksight/api_op_CreateIngestion.go @@ -0,0 +1,215 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateIngestionInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset used in the ingestion. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // An ID for the ingestion. + // + // IngestionId is a required field + IngestionId *string `location:"uri" locationName:"IngestionId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateIngestionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateIngestionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateIngestionInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.IngestionId == nil { + invalidParams.Add(aws.NewErrParamRequired("IngestionId")) + } + if s.IngestionId != nil && len(*s.IngestionId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("IngestionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateIngestionInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IngestionId != nil { + v := *s.IngestionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "IngestionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateIngestionOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the data ingestion. + Arn *string `type:"string"` + + // An ID for the ingestion. + IngestionId *string `min:"1" type:"string"` + + // The ingestion status. + IngestionStatus IngestionStatus `type:"string" enum:"true"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateIngestionOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateIngestionOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IngestionId != nil { + v := *s.IngestionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IngestionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.IngestionStatus) > 0 { + v := s.IngestionStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IngestionStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opCreateIngestion = "CreateIngestion" + +// CreateIngestionRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Creates and starts a new SPICE ingestion on a dataset +// +// Any ingestions operating on tagged datasets inherit the same tags automatically +// for use in access-control. For an example, see How do I create an IAM policy +// to control access to Amazon EC2 resources using tags? (https://aws.example.com/premiumsupport/knowledge-center/iam-ec2-resource-tags/). +// Tags will be visible on the tagged dataset, but not on the ingestion resource. +// +// // Example sending a request using CreateIngestionRequest. +// req := client.CreateIngestionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateIngestion +func (c *Client) CreateIngestionRequest(input *CreateIngestionInput) CreateIngestionRequest { + op := &aws.Operation{ + Name: opCreateIngestion, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions/{IngestionId}", + } + + if input == nil { + input = &CreateIngestionInput{} + } + + req := c.newRequest(op, input, &CreateIngestionOutput{}) + return CreateIngestionRequest{Request: req, Input: input, Copy: c.CreateIngestionRequest} +} + +// CreateIngestionRequest is the request type for the +// CreateIngestion API operation. +type CreateIngestionRequest struct { + *aws.Request + Input *CreateIngestionInput + Copy func(*CreateIngestionInput) CreateIngestionRequest +} + +// Send marshals and sends the CreateIngestion API request. +func (r CreateIngestionRequest) Send(ctx context.Context) (*CreateIngestionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateIngestionResponse{ + CreateIngestionOutput: r.Request.Data.(*CreateIngestionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateIngestionResponse is the response type for the +// CreateIngestion API operation. +type CreateIngestionResponse struct { + *CreateIngestionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateIngestion request. +func (r *CreateIngestionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_CreateTemplate.go b/service/quicksight/api_op_CreateTemplate.go new file mode 100644 index 00000000000..11624dbe7f8 --- /dev/null +++ b/service/quicksight/api_op_CreateTemplate.go @@ -0,0 +1,338 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateTemplateInput struct { + _ struct{} `type:"structure"` + + // The ID for the AWS account that the group is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // A display name for the template. + Name *string `min:"1" type:"string"` + + // A list of resource permissions to be set on the template. The shorthand syntax + // should look similar to this: Shorthand Syntax: Principal=string,Actions=string,string + // ... + Permissions []ResourcePermission `min:"1" type:"list"` + + // The ARN of the source entity from which this template is being created. Templates + // can be currently created from an analysis or another template. If the ARN + // is for an analysis, you must include its dataset references. + // + // SourceEntity is a required field + SourceEntity *TemplateSourceEntity `type:"structure" required:"true"` + + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the resource. + Tags []Tag `min:"1" type:"list"` + + // An ID for the template you want to create. This is unique per AWS region + // per AWS account. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + + // A description of the current template version being created. This API created + // the first version of the template. Every time UpdateTemplate is called a + // new version is created. Each version of the template maintains a description + // of the version in the VersionDescription field. + VersionDescription *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateTemplateInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + if s.Permissions != nil && len(s.Permissions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Permissions", 1)) + } + + if s.SourceEntity == nil { + invalidParams.Add(aws.NewErrParamRequired("SourceEntity")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) + } + + if s.TemplateId == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateId", 1)) + } + if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("VersionDescription", 1)) + } + if s.Permissions != nil { + for i, v := range s.Permissions { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.SourceEntity != nil { + if err := s.SourceEntity.Validate(); err != nil { + invalidParams.AddNested("SourceEntity", err.(aws.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateTemplateInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Permissions != nil { + v := s.Permissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Permissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.SourceEntity != nil { + v := s.SourceEntity + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SourceEntity", v, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.VersionDescription != nil { + v := *s.VersionDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VersionDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateTemplateOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the template. + Arn *string `type:"string"` + + // The template creation status. + CreationStatus ResourceStatus `type:"string" enum:"true"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The ID of the template. + TemplateId *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) for the template, including the version information + // of the first version. + VersionArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateTemplateOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.CreationStatus) > 0 { + v := s.CreationStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreationStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.VersionArn != nil { + v := *s.VersionArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VersionArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opCreateTemplate = "CreateTemplate" + +// CreateTemplateRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Creates a template from an existing QuickSight analysis or template. The +// resulting template can be used to create a dashboard. +// +// A template is an entity in QuickSight which encapsulates the metadata required +// to create an analysis that can be used to create dashboard. It adds a layer +// of abstraction by use placeholders to replace the dataset associated with +// the analysis. You can use templates to create dashboards by replacing dataset +// placeholders with datasets which follow the same schema that was used to +// create the source analysis and template. +// +// To create a template from an existing analysis, use the analysis's ARN, aws-account-id, +// template-id, source-entity, and data-set-references. +// +// CLI syntax to create a template: +// +// aws quicksight create-template —cli-input-json file://create-template.json +// +// CLI syntax to create a template from another template in the same AWS account: +// +// aws quicksight create-template --aws-account-id 111122223333 --template-id +// reports_test_template --data-set-references DataSetPlaceholder=reports,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/0dfc789c-81f6-4f4f-b9ac-7db2453eefc8 +// DataSetPlaceholder=Elblogs,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/f60da323-af68-45db-9016-08e0d1d7ded5 +// --source-entity SourceAnalysis='{Arn=arn:aws:quicksight:us-west-2:111122223333:analysis/7fb74527-c36d-4be8-8139-ac1be4c97365}' +// +// To create template from another account’s template, you need to grant cross +// account resource permission for DescribeTemplate the account that contains +// the template. +// +// You can use a file to pass JSON to the function if you prefer. +// +// // Example sending a request using CreateTemplateRequest. +// req := client.CreateTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateTemplate +func (c *Client) CreateTemplateRequest(input *CreateTemplateInput) CreateTemplateRequest { + op := &aws.Operation{ + Name: opCreateTemplate, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}", + } + + if input == nil { + input = &CreateTemplateInput{} + } + + req := c.newRequest(op, input, &CreateTemplateOutput{}) + return CreateTemplateRequest{Request: req, Input: input, Copy: c.CreateTemplateRequest} +} + +// CreateTemplateRequest is the request type for the +// CreateTemplate API operation. +type CreateTemplateRequest struct { + *aws.Request + Input *CreateTemplateInput + Copy func(*CreateTemplateInput) CreateTemplateRequest +} + +// Send marshals and sends the CreateTemplate API request. +func (r CreateTemplateRequest) Send(ctx context.Context) (*CreateTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateTemplateResponse{ + CreateTemplateOutput: r.Request.Data.(*CreateTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateTemplateResponse is the response type for the +// CreateTemplate API operation. +type CreateTemplateResponse struct { + *CreateTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateTemplate request. +func (r *CreateTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_CreateTemplateAlias.go b/service/quicksight/api_op_CreateTemplateAlias.go new file mode 100644 index 00000000000..77149e44f17 --- /dev/null +++ b/service/quicksight/api_op_CreateTemplateAlias.go @@ -0,0 +1,220 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type CreateTemplateAliasInput struct { + _ struct{} `type:"structure"` + + // The name you want to give the template's alias. Alias names can't begin with + // a $, which is reserved by QuickSight. Alias names that start with ‘$’ + // sign are QuickSight reserved naming and can't be deleted. + // + // AliasName is a required field + AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` + + // AWS account ID that contains the template you are aliasing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // An ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + + // The version number of the template. + // + // TemplateVersionNumber is a required field + TemplateVersionNumber *int64 `min:"1" type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateTemplateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTemplateAliasInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateTemplateAliasInput"} + + if s.AliasName == nil { + invalidParams.Add(aws.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AliasName", 1)) + } + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.TemplateId == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateId", 1)) + } + + if s.TemplateVersionNumber == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateVersionNumber")) + } + if s.TemplateVersionNumber != nil && *s.TemplateVersionNumber < 1 { + invalidParams.Add(aws.NewErrParamMinValue("TemplateVersionNumber", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateTemplateAliasInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.TemplateVersionNumber != nil { + v := *s.TemplateVersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateVersionNumber", protocol.Int64Value(v), metadata) + } + if s.AliasName != nil { + v := *s.AliasName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AliasName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type CreateTemplateAliasOutput struct { + _ struct{} `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // Information on the template alias. + TemplateAlias *TemplateAlias `type:"structure"` +} + +// String returns the string representation +func (s CreateTemplateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateTemplateAliasOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateAlias != nil { + v := s.TemplateAlias + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "TemplateAlias", v, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opCreateTemplateAlias = "CreateTemplateAlias" + +// CreateTemplateAliasRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Creates a template alias for a template. +// +// CLI syntax: +// +// aws quicksight create-template-alias --aws-account-id 111122223333 --template-id +// 'reports_test_template' --alias-name PROD —version-number 1 +// +// // Example sending a request using CreateTemplateAliasRequest. +// req := client.CreateTemplateAliasRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateTemplateAlias +func (c *Client) CreateTemplateAliasRequest(input *CreateTemplateAliasInput) CreateTemplateAliasRequest { + op := &aws.Operation{ + Name: opCreateTemplateAlias, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}", + } + + if input == nil { + input = &CreateTemplateAliasInput{} + } + + req := c.newRequest(op, input, &CreateTemplateAliasOutput{}) + return CreateTemplateAliasRequest{Request: req, Input: input, Copy: c.CreateTemplateAliasRequest} +} + +// CreateTemplateAliasRequest is the request type for the +// CreateTemplateAlias API operation. +type CreateTemplateAliasRequest struct { + *aws.Request + Input *CreateTemplateAliasInput + Copy func(*CreateTemplateAliasInput) CreateTemplateAliasRequest +} + +// Send marshals and sends the CreateTemplateAlias API request. +func (r CreateTemplateAliasRequest) Send(ctx context.Context) (*CreateTemplateAliasResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateTemplateAliasResponse{ + CreateTemplateAliasOutput: r.Request.Data.(*CreateTemplateAliasOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateTemplateAliasResponse is the response type for the +// CreateTemplateAlias API operation. +type CreateTemplateAliasResponse struct { + *CreateTemplateAliasOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateTemplateAlias request. +func (r *CreateTemplateAliasResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DeleteDashboard.go b/service/quicksight/api_op_DeleteDashboard.go new file mode 100644 index 00000000000..e9f63c8d9e2 --- /dev/null +++ b/service/quicksight/api_op_DeleteDashboard.go @@ -0,0 +1,207 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DeleteDashboardInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the dashboard you are deleting. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dashboard. + // + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + + // The version number of the dashboard. If version number property is provided, + // only the specified version of the dashboard is deleted. + VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` +} + +// String returns the string representation +func (s DeleteDashboardInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDashboardInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteDashboardInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DashboardId == nil { + invalidParams.Add(aws.NewErrParamRequired("DashboardId")) + } + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DashboardId", 1)) + } + if s.VersionNumber != nil && *s.VersionNumber < 1 { + invalidParams.Add(aws.NewErrParamMinValue("VersionNumber", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteDashboardInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.VersionNumber != nil { + v := *s.VersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "version-number", protocol.Int64Value(v), metadata) + } + return nil +} + +type DeleteDashboardOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource. + Arn *string `type:"string"` + + // The ID of the dashboard. + DashboardId *string `min:"1" type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DeleteDashboardOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteDashboardOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDeleteDashboard = "DeleteDashboard" + +// DeleteDashboardRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Deletes a dashboard. +// +// CLI syntax: +// +// aws quicksight delete-dashboard --aws-account-id 111122223333 —dashboard-id +// 123123123 +// +// aws quicksight delete-dashboard --aws-account-id 111122223333 —dashboard-id +// 123123123 —version-number 3 +// +// // Example sending a request using DeleteDashboardRequest. +// req := client.DeleteDashboardRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDashboard +func (c *Client) DeleteDashboardRequest(input *DeleteDashboardInput) DeleteDashboardRequest { + op := &aws.Operation{ + Name: opDeleteDashboard, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}", + } + + if input == nil { + input = &DeleteDashboardInput{} + } + + req := c.newRequest(op, input, &DeleteDashboardOutput{}) + return DeleteDashboardRequest{Request: req, Input: input, Copy: c.DeleteDashboardRequest} +} + +// DeleteDashboardRequest is the request type for the +// DeleteDashboard API operation. +type DeleteDashboardRequest struct { + *aws.Request + Input *DeleteDashboardInput + Copy func(*DeleteDashboardInput) DeleteDashboardRequest +} + +// Send marshals and sends the DeleteDashboard API request. +func (r DeleteDashboardRequest) Send(ctx context.Context) (*DeleteDashboardResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteDashboardResponse{ + DeleteDashboardOutput: r.Request.Data.(*DeleteDashboardOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteDashboardResponse is the response type for the +// DeleteDashboard API operation. +type DeleteDashboardResponse struct { + *DeleteDashboardOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteDashboard request. +func (r *DeleteDashboardResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DeleteDataSet.go b/service/quicksight/api_op_DeleteDataSet.go new file mode 100644 index 00000000000..0c85115fa4e --- /dev/null +++ b/service/quicksight/api_op_DeleteDataSet.go @@ -0,0 +1,193 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DeleteDataSetInput struct { + _ struct{} `type:"structure"` + + // The AWS Account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dataset you want to create. This is unique per region per + // AWS account. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDataSetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDataSetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteDataSetInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteDataSetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteDataSetOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the dataset. + Arn *string `type:"string"` + + // The ID for the dataset you want to create. This is unique per region per + // AWS account. + DataSetId *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DeleteDataSetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteDataSetOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDeleteDataSet = "DeleteDataSet" + +// DeleteDataSetRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Deletes a dataset. +// +// CLI syntax: +// +// aws quicksight delete-data-set \ +// +// --aws-account-id=111111111111 \ +// +// --data-set-id=unique-data-set-id +// +// // Example sending a request using DeleteDataSetRequest. +// req := client.DeleteDataSetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDataSet +func (c *Client) DeleteDataSetRequest(input *DeleteDataSetInput) DeleteDataSetRequest { + op := &aws.Operation{ + Name: opDeleteDataSet, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}", + } + + if input == nil { + input = &DeleteDataSetInput{} + } + + req := c.newRequest(op, input, &DeleteDataSetOutput{}) + return DeleteDataSetRequest{Request: req, Input: input, Copy: c.DeleteDataSetRequest} +} + +// DeleteDataSetRequest is the request type for the +// DeleteDataSet API operation. +type DeleteDataSetRequest struct { + *aws.Request + Input *DeleteDataSetInput + Copy func(*DeleteDataSetInput) DeleteDataSetRequest +} + +// Send marshals and sends the DeleteDataSet API request. +func (r DeleteDataSetRequest) Send(ctx context.Context) (*DeleteDataSetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteDataSetResponse{ + DeleteDataSetOutput: r.Request.Data.(*DeleteDataSetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteDataSetResponse is the response type for the +// DeleteDataSet API operation. +type DeleteDataSetResponse struct { + *DeleteDataSetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteDataSet request. +func (r *DeleteDataSetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DeleteDataSource.go b/service/quicksight/api_op_DeleteDataSource.go new file mode 100644 index 00000000000..0f4a70cdc36 --- /dev/null +++ b/service/quicksight/api_op_DeleteDataSource.go @@ -0,0 +1,192 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DeleteDataSourceInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the data source. This is unique per AWS Region per AWS account. + // + // DataSourceId is a required field + DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDataSourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDataSourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteDataSourceInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSourceId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSourceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteDataSourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSourceId != nil { + v := *s.DataSourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteDataSourceOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the data source you deleted. + Arn *string `type:"string"` + + // The ID of the data source. This is unique per AWS Region per AWS account. + DataSourceId *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DeleteDataSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteDataSourceOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSourceId != nil { + v := *s.DataSourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDeleteDataSource = "DeleteDataSource" + +// DeleteDataSourceRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Deletes the data source permanently. This action breaks all the datasets +// that reference the deleted data source. +// +// CLI syntax: +// +// aws quicksight delete-data-source \ +// +// --aws-account-id=111122223333 \ +// +// --data-source-id=unique-data-source-id +// +// // Example sending a request using DeleteDataSourceRequest. +// req := client.DeleteDataSourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDataSource +func (c *Client) DeleteDataSourceRequest(input *DeleteDataSourceInput) DeleteDataSourceRequest { + op := &aws.Operation{ + Name: opDeleteDataSource, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}", + } + + if input == nil { + input = &DeleteDataSourceInput{} + } + + req := c.newRequest(op, input, &DeleteDataSourceOutput{}) + return DeleteDataSourceRequest{Request: req, Input: input, Copy: c.DeleteDataSourceRequest} +} + +// DeleteDataSourceRequest is the request type for the +// DeleteDataSource API operation. +type DeleteDataSourceRequest struct { + *aws.Request + Input *DeleteDataSourceInput + Copy func(*DeleteDataSourceInput) DeleteDataSourceRequest +} + +// Send marshals and sends the DeleteDataSource API request. +func (r DeleteDataSourceRequest) Send(ctx context.Context) (*DeleteDataSourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteDataSourceResponse{ + DeleteDataSourceOutput: r.Request.Data.(*DeleteDataSourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteDataSourceResponse is the response type for the +// DeleteDataSource API operation. +type DeleteDataSourceResponse struct { + *DeleteDataSourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteDataSource request. +func (r *DeleteDataSourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DeleteIAMPolicyAssignment.go b/service/quicksight/api_op_DeleteIAMPolicyAssignment.go new file mode 100644 index 00000000000..717df4cf05f --- /dev/null +++ b/service/quicksight/api_op_DeleteIAMPolicyAssignment.go @@ -0,0 +1,197 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DeleteIAMPolicyAssignmentInput struct { + _ struct{} `type:"structure"` + + // The name of the assignment. + // + // AssignmentName is a required field + AssignmentName *string `location:"uri" locationName:"AssignmentName" min:"1" type:"string" required:"true"` + + // The AWS account ID where you want to delete an IAM policy assignment. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The namespace that contains the assignment. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIAMPolicyAssignmentInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIAMPolicyAssignmentInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteIAMPolicyAssignmentInput"} + + if s.AssignmentName == nil { + invalidParams.Add(aws.NewErrParamRequired("AssignmentName")) + } + if s.AssignmentName != nil && len(*s.AssignmentName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AssignmentName", 1)) + } + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.Namespace == nil { + invalidParams.Add(aws.NewErrParamRequired("Namespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteIAMPolicyAssignmentInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AssignmentName != nil { + v := *s.AssignmentName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AssignmentName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Namespace != nil { + v := *s.Namespace + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "Namespace", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteIAMPolicyAssignmentOutput struct { + _ struct{} `type:"structure"` + + // The name of the assignment. + AssignmentName *string `min:"1" type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DeleteIAMPolicyAssignmentOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteIAMPolicyAssignmentOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.AssignmentName != nil { + v := *s.AssignmentName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDeleteIAMPolicyAssignment = "DeleteIAMPolicyAssignment" + +// DeleteIAMPolicyAssignmentRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Deletes an existing assignment. +// +// CLI syntax: +// +// aws quicksight delete-iam-policy-assignment --aws-account-id=111122223333 +// --assignment-name=testtest --region=us-east-1 --namespace=default +// +// // Example sending a request using DeleteIAMPolicyAssignmentRequest. +// req := client.DeleteIAMPolicyAssignmentRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteIAMPolicyAssignment +func (c *Client) DeleteIAMPolicyAssignmentRequest(input *DeleteIAMPolicyAssignmentInput) DeleteIAMPolicyAssignmentRequest { + op := &aws.Operation{ + Name: opDeleteIAMPolicyAssignment, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/namespace/{Namespace}/iam-policy-assignments/{AssignmentName}", + } + + if input == nil { + input = &DeleteIAMPolicyAssignmentInput{} + } + + req := c.newRequest(op, input, &DeleteIAMPolicyAssignmentOutput{}) + return DeleteIAMPolicyAssignmentRequest{Request: req, Input: input, Copy: c.DeleteIAMPolicyAssignmentRequest} +} + +// DeleteIAMPolicyAssignmentRequest is the request type for the +// DeleteIAMPolicyAssignment API operation. +type DeleteIAMPolicyAssignmentRequest struct { + *aws.Request + Input *DeleteIAMPolicyAssignmentInput + Copy func(*DeleteIAMPolicyAssignmentInput) DeleteIAMPolicyAssignmentRequest +} + +// Send marshals and sends the DeleteIAMPolicyAssignment API request. +func (r DeleteIAMPolicyAssignmentRequest) Send(ctx context.Context) (*DeleteIAMPolicyAssignmentResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteIAMPolicyAssignmentResponse{ + DeleteIAMPolicyAssignmentOutput: r.Request.Data.(*DeleteIAMPolicyAssignmentOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteIAMPolicyAssignmentResponse is the response type for the +// DeleteIAMPolicyAssignment API operation. +type DeleteIAMPolicyAssignmentResponse struct { + *DeleteIAMPolicyAssignmentOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteIAMPolicyAssignment request. +func (r *DeleteIAMPolicyAssignmentResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DeleteTemplate.go b/service/quicksight/api_op_DeleteTemplate.go new file mode 100644 index 00000000000..c9c32bd5c2d --- /dev/null +++ b/service/quicksight/api_op_DeleteTemplate.go @@ -0,0 +1,220 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DeleteTemplateInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the template you are deleting. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // An ID for the template you want to delete. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + + // The version number + VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` +} + +// String returns the string representation +func (s DeleteTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteTemplateInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.TemplateId == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateId", 1)) + } + if s.VersionNumber != nil && *s.VersionNumber < 1 { + invalidParams.Add(aws.NewErrParamMinValue("VersionNumber", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteTemplateInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.VersionNumber != nil { + v := *s.VersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "version-number", protocol.Int64Value(v), metadata) + } + return nil +} + +type DeleteTemplateOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource. + Arn *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // An ID for the template. + TemplateId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteTemplateOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDeleteTemplate = "DeleteTemplate" + +// DeleteTemplateRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Deletes a template. +// +// CLI syntax: +// +// * aws quicksight delete-template --aws-account-id 111122223333 —-template-id +// reports_test_template --version-number 2 +// +// * aws quicksight delete-template —aws-account-id 111122223333 —template-id +// reports_test_template —alias-name STAGING +// +// * aws quicksight delete-template —aws-account-id 111122223333 —template-id +// reports_test_template —alias-name ‘\$LATEST’ +// +// * aws quicksight delete-template --aws-account-id 111122223333 —-template-id +// reports_test_template +// +// If version number which is an optional field is not passed the template (including +// all the versions) is deleted by the API, if version number is provided, the +// specific template version is deleted by the API. +// +// Users can explicitly describe the latest version of the template by passing +// $LATEST to the alias-name parameter. $LATEST is an internally supported alias, +// which points to the latest version of the template. +// +// // Example sending a request using DeleteTemplateRequest. +// req := client.DeleteTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteTemplate +func (c *Client) DeleteTemplateRequest(input *DeleteTemplateInput) DeleteTemplateRequest { + op := &aws.Operation{ + Name: opDeleteTemplate, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}", + } + + if input == nil { + input = &DeleteTemplateInput{} + } + + req := c.newRequest(op, input, &DeleteTemplateOutput{}) + return DeleteTemplateRequest{Request: req, Input: input, Copy: c.DeleteTemplateRequest} +} + +// DeleteTemplateRequest is the request type for the +// DeleteTemplate API operation. +type DeleteTemplateRequest struct { + *aws.Request + Input *DeleteTemplateInput + Copy func(*DeleteTemplateInput) DeleteTemplateRequest +} + +// Send marshals and sends the DeleteTemplate API request. +func (r DeleteTemplateRequest) Send(ctx context.Context) (*DeleteTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteTemplateResponse{ + DeleteTemplateOutput: r.Request.Data.(*DeleteTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteTemplateResponse is the response type for the +// DeleteTemplate API operation. +type DeleteTemplateResponse struct { + *DeleteTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteTemplate request. +func (r *DeleteTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DeleteTemplateAlias.go b/service/quicksight/api_op_DeleteTemplateAlias.go new file mode 100644 index 00000000000..d0e8fc549e1 --- /dev/null +++ b/service/quicksight/api_op_DeleteTemplateAlias.go @@ -0,0 +1,220 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DeleteTemplateAliasInput struct { + _ struct{} `type:"structure"` + + // The alias of the template. If alias-name is provided, the version that the + // alias-name points to is deleted. Alias names that start with $ are reserved + // by QuickSight and can't be deleted.” + // + // AliasName is a required field + AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` + + // AWS account ID that contains the template alias you are deleting. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // An ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTemplateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTemplateAliasInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteTemplateAliasInput"} + + if s.AliasName == nil { + invalidParams.Add(aws.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AliasName", 1)) + } + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.TemplateId == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteTemplateAliasInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AliasName != nil { + v := *s.AliasName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AliasName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DeleteTemplateAliasOutput struct { + _ struct{} `type:"structure"` + + // The name of the alias. + AliasName *string `min:"1" type:"string"` + + // The ARN of the resource. + Arn *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // An ID for the template. + TemplateId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteTemplateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteTemplateAliasOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.AliasName != nil { + v := *s.AliasName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AliasName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDeleteTemplateAlias = "DeleteTemplateAlias" + +// DeleteTemplateAliasRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Update template alias of given template. +// +// CLI syntax: +// +// aws quicksight delete-template-alias --aws-account-id 111122223333 --template-id +// 'reports_test_template' --alias-name 'STAGING' +// +// // Example sending a request using DeleteTemplateAliasRequest. +// req := client.DeleteTemplateAliasRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteTemplateAlias +func (c *Client) DeleteTemplateAliasRequest(input *DeleteTemplateAliasInput) DeleteTemplateAliasRequest { + op := &aws.Operation{ + Name: opDeleteTemplateAlias, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}", + } + + if input == nil { + input = &DeleteTemplateAliasInput{} + } + + req := c.newRequest(op, input, &DeleteTemplateAliasOutput{}) + return DeleteTemplateAliasRequest{Request: req, Input: input, Copy: c.DeleteTemplateAliasRequest} +} + +// DeleteTemplateAliasRequest is the request type for the +// DeleteTemplateAlias API operation. +type DeleteTemplateAliasRequest struct { + *aws.Request + Input *DeleteTemplateAliasInput + Copy func(*DeleteTemplateAliasInput) DeleteTemplateAliasRequest +} + +// Send marshals and sends the DeleteTemplateAlias API request. +func (r DeleteTemplateAliasRequest) Send(ctx context.Context) (*DeleteTemplateAliasResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteTemplateAliasResponse{ + DeleteTemplateAliasOutput: r.Request.Data.(*DeleteTemplateAliasOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteTemplateAliasResponse is the response type for the +// DeleteTemplateAlias API operation. +type DeleteTemplateAliasResponse struct { + *DeleteTemplateAliasOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteTemplateAlias request. +func (r *DeleteTemplateAliasResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DeleteUser.go b/service/quicksight/api_op_DeleteUser.go index 3d5e74ec643..8d9cedbce39 100644 --- a/service/quicksight/api_op_DeleteUser.go +++ b/service/quicksight/api_op_DeleteUser.go @@ -124,8 +124,6 @@ const opDeleteUser = "DeleteUser" // the AWS Identity and Access Management (IAM) user or role that's making the // call. The IAM user isn't deleted as a result of this call. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/ . -// // CLI Sample: // // aws quicksight delete-user --aws-account-id=111122223333 --namespace=default diff --git a/service/quicksight/api_op_DeleteUserByPrincipalId.go b/service/quicksight/api_op_DeleteUserByPrincipalId.go index ff5ff4dd5ad..ff96bb7a1b8 100644 --- a/service/quicksight/api_op_DeleteUserByPrincipalId.go +++ b/service/quicksight/api_op_DeleteUserByPrincipalId.go @@ -119,8 +119,6 @@ const opDeleteUserByPrincipalId = "DeleteUserByPrincipalId" // // Deletes a user identified by its principal ID. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/ . -// // CLI Sample: // // aws quicksight delete-user-by-principal-id --aws-account-id=111122223333 diff --git a/service/quicksight/api_op_DescribeDashboard.go b/service/quicksight/api_op_DescribeDashboard.go new file mode 100644 index 00000000000..37880cbc29f --- /dev/null +++ b/service/quicksight/api_op_DescribeDashboard.go @@ -0,0 +1,210 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeDashboardInput struct { + _ struct{} `type:"structure"` + + // The alias name. + AliasName *string `location:"querystring" locationName:"alias-name" min:"1" type:"string"` + + // AWS account ID that contains the dashboard you are describing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dashboard. + // + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + + // The version number for the dashboard. If version number isn’t passed the + // latest published dashboard version is described. + VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` +} + +// String returns the string representation +func (s DescribeDashboardInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDashboardInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeDashboardInput"} + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AliasName", 1)) + } + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DashboardId == nil { + invalidParams.Add(aws.NewErrParamRequired("DashboardId")) + } + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DashboardId", 1)) + } + if s.VersionNumber != nil && *s.VersionNumber < 1 { + invalidParams.Add(aws.NewErrParamMinValue("VersionNumber", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeDashboardInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AliasName != nil { + v := *s.AliasName + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "alias-name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.VersionNumber != nil { + v := *s.VersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "version-number", protocol.Int64Value(v), metadata) + } + return nil +} + +type DescribeDashboardOutput struct { + _ struct{} `type:"structure"` + + // Information about the dashboard. + Dashboard *Dashboard `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of this request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DescribeDashboardOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeDashboardOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Dashboard != nil { + v := s.Dashboard + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Dashboard", v, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDescribeDashboard = "DescribeDashboard" + +// DescribeDashboardRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Provides a summary for a dashboard. +// +// CLI syntax: +// +// * aws quicksight describe-dashboard --aws-account-id 111122223333 —dashboard-id +// reports_test_report -version-number 2 +// +// * aws quicksight describe-dashboard --aws-account-id 111122223333 —dashboard-id +// reports_test_report -alias-name ‘$PUBLISHED’ +// +// // Example sending a request using DescribeDashboardRequest. +// req := client.DescribeDashboardRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDashboard +func (c *Client) DescribeDashboardRequest(input *DescribeDashboardInput) DescribeDashboardRequest { + op := &aws.Operation{ + Name: opDescribeDashboard, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}", + } + + if input == nil { + input = &DescribeDashboardInput{} + } + + req := c.newRequest(op, input, &DescribeDashboardOutput{}) + return DescribeDashboardRequest{Request: req, Input: input, Copy: c.DescribeDashboardRequest} +} + +// DescribeDashboardRequest is the request type for the +// DescribeDashboard API operation. +type DescribeDashboardRequest struct { + *aws.Request + Input *DescribeDashboardInput + Copy func(*DescribeDashboardInput) DescribeDashboardRequest +} + +// Send marshals and sends the DescribeDashboard API request. +func (r DescribeDashboardRequest) Send(ctx context.Context) (*DescribeDashboardResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeDashboardResponse{ + DescribeDashboardOutput: r.Request.Data.(*DescribeDashboardOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeDashboardResponse is the response type for the +// DescribeDashboard API operation. +type DescribeDashboardResponse struct { + *DescribeDashboardOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeDashboard request. +func (r *DescribeDashboardResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DescribeDashboardPermissions.go b/service/quicksight/api_op_DescribeDashboardPermissions.go new file mode 100644 index 00000000000..911b30bd0e5 --- /dev/null +++ b/service/quicksight/api_op_DescribeDashboardPermissions.go @@ -0,0 +1,207 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeDashboardPermissionsInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the dashboard you are describing permissions + // of. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dashboard, also added to IAM policy. + // + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDashboardPermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDashboardPermissionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeDashboardPermissionsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DashboardId == nil { + invalidParams.Add(aws.NewErrParamRequired("DashboardId")) + } + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DashboardId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeDashboardPermissionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeDashboardPermissionsOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the dashboard. + DashboardArn *string `type:"string"` + + // The ID for the dashboard. + DashboardId *string `min:"1" type:"string"` + + // A structure that contains the permissions of the dashboard. + Permissions []ResourcePermission `min:"1" type:"list"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DescribeDashboardPermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeDashboardPermissionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DashboardArn != nil { + v := *s.DashboardArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Permissions != nil { + v := s.Permissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Permissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDescribeDashboardPermissions = "DescribeDashboardPermissions" + +// DescribeDashboardPermissionsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Describes read and write permissions on a dashboard. +// +// CLI syntax: +// +// aws quicksight describe-dashboard-permissions --aws-account-id 735340738645 +// —dashboard-id reports_test_bob_report +// +// // Example sending a request using DescribeDashboardPermissionsRequest. +// req := client.DescribeDashboardPermissionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDashboardPermissions +func (c *Client) DescribeDashboardPermissionsRequest(input *DescribeDashboardPermissionsInput) DescribeDashboardPermissionsRequest { + op := &aws.Operation{ + Name: opDescribeDashboardPermissions, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/permissions", + } + + if input == nil { + input = &DescribeDashboardPermissionsInput{} + } + + req := c.newRequest(op, input, &DescribeDashboardPermissionsOutput{}) + return DescribeDashboardPermissionsRequest{Request: req, Input: input, Copy: c.DescribeDashboardPermissionsRequest} +} + +// DescribeDashboardPermissionsRequest is the request type for the +// DescribeDashboardPermissions API operation. +type DescribeDashboardPermissionsRequest struct { + *aws.Request + Input *DescribeDashboardPermissionsInput + Copy func(*DescribeDashboardPermissionsInput) DescribeDashboardPermissionsRequest +} + +// Send marshals and sends the DescribeDashboardPermissions API request. +func (r DescribeDashboardPermissionsRequest) Send(ctx context.Context) (*DescribeDashboardPermissionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeDashboardPermissionsResponse{ + DescribeDashboardPermissionsOutput: r.Request.Data.(*DescribeDashboardPermissionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeDashboardPermissionsResponse is the response type for the +// DescribeDashboardPermissions API operation. +type DescribeDashboardPermissionsResponse struct { + *DescribeDashboardPermissionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeDashboardPermissions request. +func (r *DescribeDashboardPermissionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DescribeDataSet.go b/service/quicksight/api_op_DescribeDataSet.go new file mode 100644 index 00000000000..73872eb3a23 --- /dev/null +++ b/service/quicksight/api_op_DescribeDataSet.go @@ -0,0 +1,183 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeDataSetInput struct { + _ struct{} `type:"structure"` + + // The AWS Account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dataset you want to create. This is unique per region per + // AWS account. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDataSetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDataSetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeDataSetInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeDataSetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeDataSetOutput struct { + _ struct{} `type:"structure"` + + // Information on the dataset. + DataSet *DataSet `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DescribeDataSetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeDataSetOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSet != nil { + v := s.DataSet + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DataSet", v, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDescribeDataSet = "DescribeDataSet" + +// DescribeDataSetRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Describes a dataset. +// +// CLI syntax: +// +// aws quicksight describe-data-set \ +// +// --aws-account-id=111111111111 \ +// +// --data-set-id=unique-data-set-id +// +// // Example sending a request using DescribeDataSetRequest. +// req := client.DescribeDataSetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSet +func (c *Client) DescribeDataSetRequest(input *DescribeDataSetInput) DescribeDataSetRequest { + op := &aws.Operation{ + Name: opDescribeDataSet, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}", + } + + if input == nil { + input = &DescribeDataSetInput{} + } + + req := c.newRequest(op, input, &DescribeDataSetOutput{}) + return DescribeDataSetRequest{Request: req, Input: input, Copy: c.DescribeDataSetRequest} +} + +// DescribeDataSetRequest is the request type for the +// DescribeDataSet API operation. +type DescribeDataSetRequest struct { + *aws.Request + Input *DescribeDataSetInput + Copy func(*DescribeDataSetInput) DescribeDataSetRequest +} + +// Send marshals and sends the DescribeDataSet API request. +func (r DescribeDataSetRequest) Send(ctx context.Context) (*DescribeDataSetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeDataSetResponse{ + DescribeDataSetOutput: r.Request.Data.(*DescribeDataSetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeDataSetResponse is the response type for the +// DescribeDataSet API operation. +type DescribeDataSetResponse struct { + *DescribeDataSetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeDataSet request. +func (r *DescribeDataSetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DescribeDataSetPermissions.go b/service/quicksight/api_op_DescribeDataSetPermissions.go new file mode 100644 index 00000000000..fe4d0e6a8c6 --- /dev/null +++ b/service/quicksight/api_op_DescribeDataSetPermissions.go @@ -0,0 +1,210 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeDataSetPermissionsInput struct { + _ struct{} `type:"structure"` + + // The AWS Account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dataset you want to create. This is unique per region per + // AWS account. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDataSetPermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDataSetPermissionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeDataSetPermissionsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeDataSetPermissionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeDataSetPermissionsOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the dataset. + DataSetArn *string `type:"string"` + + // The ID for the dataset you want to create. This is unique per region per + // AWS account. + DataSetId *string `type:"string"` + + // A list of resource permissions on the dataset. + Permissions []ResourcePermission `min:"1" type:"list"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DescribeDataSetPermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeDataSetPermissionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSetArn != nil { + v := *s.DataSetArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Permissions != nil { + v := s.Permissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Permissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDescribeDataSetPermissions = "DescribeDataSetPermissions" + +// DescribeDataSetPermissionsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Describes the permissions on a dataset. +// +// The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/data-set-id +// +// CLI syntax: +// +// aws quicksight describe-data-set-permissions \ +// +// --aws-account-id=111122223333 \ +// +// --data-set-id=unique-data-set-id \ +// +// // Example sending a request using DescribeDataSetPermissionsRequest. +// req := client.DescribeDataSetPermissionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSetPermissions +func (c *Client) DescribeDataSetPermissionsRequest(input *DescribeDataSetPermissionsInput) DescribeDataSetPermissionsRequest { + op := &aws.Operation{ + Name: opDescribeDataSetPermissions, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/permissions", + } + + if input == nil { + input = &DescribeDataSetPermissionsInput{} + } + + req := c.newRequest(op, input, &DescribeDataSetPermissionsOutput{}) + return DescribeDataSetPermissionsRequest{Request: req, Input: input, Copy: c.DescribeDataSetPermissionsRequest} +} + +// DescribeDataSetPermissionsRequest is the request type for the +// DescribeDataSetPermissions API operation. +type DescribeDataSetPermissionsRequest struct { + *aws.Request + Input *DescribeDataSetPermissionsInput + Copy func(*DescribeDataSetPermissionsInput) DescribeDataSetPermissionsRequest +} + +// Send marshals and sends the DescribeDataSetPermissions API request. +func (r DescribeDataSetPermissionsRequest) Send(ctx context.Context) (*DescribeDataSetPermissionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeDataSetPermissionsResponse{ + DescribeDataSetPermissionsOutput: r.Request.Data.(*DescribeDataSetPermissionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeDataSetPermissionsResponse is the response type for the +// DescribeDataSetPermissions API operation. +type DescribeDataSetPermissionsResponse struct { + *DescribeDataSetPermissionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeDataSetPermissions request. +func (r *DescribeDataSetPermissionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DescribeDataSource.go b/service/quicksight/api_op_DescribeDataSource.go new file mode 100644 index 00000000000..1bd003b50d0 --- /dev/null +++ b/service/quicksight/api_op_DescribeDataSource.go @@ -0,0 +1,176 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeDataSourceInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the data source. This is unique per AWS Region per AWS account. + // + // DataSourceId is a required field + DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDataSourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDataSourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeDataSourceInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSourceId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSourceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeDataSourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSourceId != nil { + v := *s.DataSourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeDataSourceOutput struct { + _ struct{} `type:"structure"` + + // The information on the data source. + DataSource *DataSource `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DescribeDataSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeDataSourceOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSource != nil { + v := s.DataSource + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DataSource", v, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDescribeDataSource = "DescribeDataSource" + +// DescribeDataSourceRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Describes a data source. +// +// The permissions resource is arn:aws:quicksight:region:aws-account-id:datasource/data-source-id +// +// // Example sending a request using DescribeDataSourceRequest. +// req := client.DescribeDataSourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSource +func (c *Client) DescribeDataSourceRequest(input *DescribeDataSourceInput) DescribeDataSourceRequest { + op := &aws.Operation{ + Name: opDescribeDataSource, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}", + } + + if input == nil { + input = &DescribeDataSourceInput{} + } + + req := c.newRequest(op, input, &DescribeDataSourceOutput{}) + return DescribeDataSourceRequest{Request: req, Input: input, Copy: c.DescribeDataSourceRequest} +} + +// DescribeDataSourceRequest is the request type for the +// DescribeDataSource API operation. +type DescribeDataSourceRequest struct { + *aws.Request + Input *DescribeDataSourceInput + Copy func(*DescribeDataSourceInput) DescribeDataSourceRequest +} + +// Send marshals and sends the DescribeDataSource API request. +func (r DescribeDataSourceRequest) Send(ctx context.Context) (*DescribeDataSourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeDataSourceResponse{ + DescribeDataSourceOutput: r.Request.Data.(*DescribeDataSourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeDataSourceResponse is the response type for the +// DescribeDataSource API operation. +type DescribeDataSourceResponse struct { + *DescribeDataSourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeDataSource request. +func (r *DescribeDataSourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DescribeDataSourcePermissions.go b/service/quicksight/api_op_DescribeDataSourcePermissions.go new file mode 100644 index 00000000000..26a9a447118 --- /dev/null +++ b/service/quicksight/api_op_DescribeDataSourcePermissions.go @@ -0,0 +1,200 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeDataSourcePermissionsInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the data source. This is unique per AWS Region per AWS account. + // + // DataSourceId is a required field + DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDataSourcePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDataSourcePermissionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeDataSourcePermissionsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSourceId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSourceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeDataSourcePermissionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSourceId != nil { + v := *s.DataSourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeDataSourcePermissionsOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the data source. + DataSourceArn *string `type:"string"` + + // The ID of the data source. This is unique per AWS Region per AWS account. + DataSourceId *string `type:"string"` + + // A list of resource permissions on the data source. + Permissions []ResourcePermission `min:"1" type:"list"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DescribeDataSourcePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeDataSourcePermissionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSourceArn != nil { + v := *s.DataSourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSourceId != nil { + v := *s.DataSourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Permissions != nil { + v := s.Permissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Permissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDescribeDataSourcePermissions = "DescribeDataSourcePermissions" + +// DescribeDataSourcePermissionsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Describes the resource permissions for a data source. +// +// The permissions resource is aws:quicksight:region:aws-account-id:datasource/data-source-id +// +// // Example sending a request using DescribeDataSourcePermissionsRequest. +// req := client.DescribeDataSourcePermissionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSourcePermissions +func (c *Client) DescribeDataSourcePermissionsRequest(input *DescribeDataSourcePermissionsInput) DescribeDataSourcePermissionsRequest { + op := &aws.Operation{ + Name: opDescribeDataSourcePermissions, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}/permissions", + } + + if input == nil { + input = &DescribeDataSourcePermissionsInput{} + } + + req := c.newRequest(op, input, &DescribeDataSourcePermissionsOutput{}) + return DescribeDataSourcePermissionsRequest{Request: req, Input: input, Copy: c.DescribeDataSourcePermissionsRequest} +} + +// DescribeDataSourcePermissionsRequest is the request type for the +// DescribeDataSourcePermissions API operation. +type DescribeDataSourcePermissionsRequest struct { + *aws.Request + Input *DescribeDataSourcePermissionsInput + Copy func(*DescribeDataSourcePermissionsInput) DescribeDataSourcePermissionsRequest +} + +// Send marshals and sends the DescribeDataSourcePermissions API request. +func (r DescribeDataSourcePermissionsRequest) Send(ctx context.Context) (*DescribeDataSourcePermissionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeDataSourcePermissionsResponse{ + DescribeDataSourcePermissionsOutput: r.Request.Data.(*DescribeDataSourcePermissionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeDataSourcePermissionsResponse is the response type for the +// DescribeDataSourcePermissions API operation. +type DescribeDataSourcePermissionsResponse struct { + *DescribeDataSourcePermissionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeDataSourcePermissions request. +func (r *DescribeDataSourcePermissionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DescribeIAMPolicyAssignment.go b/service/quicksight/api_op_DescribeIAMPolicyAssignment.go new file mode 100644 index 00000000000..709b90bc96c --- /dev/null +++ b/service/quicksight/api_op_DescribeIAMPolicyAssignment.go @@ -0,0 +1,197 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeIAMPolicyAssignmentInput struct { + _ struct{} `type:"structure"` + + // The name of the assignment. + // + // AssignmentName is a required field + AssignmentName *string `location:"uri" locationName:"AssignmentName" min:"1" type:"string" required:"true"` + + // The AWS account ID that contains the assignment you want to describe. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The namespace that contains the assignment. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeIAMPolicyAssignmentInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIAMPolicyAssignmentInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeIAMPolicyAssignmentInput"} + + if s.AssignmentName == nil { + invalidParams.Add(aws.NewErrParamRequired("AssignmentName")) + } + if s.AssignmentName != nil && len(*s.AssignmentName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AssignmentName", 1)) + } + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.Namespace == nil { + invalidParams.Add(aws.NewErrParamRequired("Namespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeIAMPolicyAssignmentInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AssignmentName != nil { + v := *s.AssignmentName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AssignmentName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Namespace != nil { + v := *s.Namespace + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "Namespace", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeIAMPolicyAssignmentOutput struct { + _ struct{} `type:"structure"` + + // Information describing the IAM policy assignment. + IAMPolicyAssignment *IAMPolicyAssignment `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DescribeIAMPolicyAssignmentOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeIAMPolicyAssignmentOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.IAMPolicyAssignment != nil { + v := s.IAMPolicyAssignment + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "IAMPolicyAssignment", v, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDescribeIAMPolicyAssignment = "DescribeIAMPolicyAssignment" + +// DescribeIAMPolicyAssignmentRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Describes an existing IAMPolicy Assignment by specified assignment name. +// +// CLI syntax: +// +// aws quicksight describe-iam-policy-assignment --aws-account-id=111122223333 +// --assignment-name=testtest --namespace=default --region=us-east-1 +// +// // Example sending a request using DescribeIAMPolicyAssignmentRequest. +// req := client.DescribeIAMPolicyAssignmentRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeIAMPolicyAssignment +func (c *Client) DescribeIAMPolicyAssignmentRequest(input *DescribeIAMPolicyAssignmentInput) DescribeIAMPolicyAssignmentRequest { + op := &aws.Operation{ + Name: opDescribeIAMPolicyAssignment, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/{AssignmentName}", + } + + if input == nil { + input = &DescribeIAMPolicyAssignmentInput{} + } + + req := c.newRequest(op, input, &DescribeIAMPolicyAssignmentOutput{}) + return DescribeIAMPolicyAssignmentRequest{Request: req, Input: input, Copy: c.DescribeIAMPolicyAssignmentRequest} +} + +// DescribeIAMPolicyAssignmentRequest is the request type for the +// DescribeIAMPolicyAssignment API operation. +type DescribeIAMPolicyAssignmentRequest struct { + *aws.Request + Input *DescribeIAMPolicyAssignmentInput + Copy func(*DescribeIAMPolicyAssignmentInput) DescribeIAMPolicyAssignmentRequest +} + +// Send marshals and sends the DescribeIAMPolicyAssignment API request. +func (r DescribeIAMPolicyAssignmentRequest) Send(ctx context.Context) (*DescribeIAMPolicyAssignmentResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeIAMPolicyAssignmentResponse{ + DescribeIAMPolicyAssignmentOutput: r.Request.Data.(*DescribeIAMPolicyAssignmentOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeIAMPolicyAssignmentResponse is the response type for the +// DescribeIAMPolicyAssignment API operation. +type DescribeIAMPolicyAssignmentResponse struct { + *DescribeIAMPolicyAssignmentOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeIAMPolicyAssignment request. +func (r *DescribeIAMPolicyAssignmentResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DescribeIngestion.go b/service/quicksight/api_op_DescribeIngestion.go new file mode 100644 index 00000000000..572ec37cff3 --- /dev/null +++ b/service/quicksight/api_op_DescribeIngestion.go @@ -0,0 +1,192 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeIngestionInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset used in the ingestion. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // An ID for the ingestion. + // + // IngestionId is a required field + IngestionId *string `location:"uri" locationName:"IngestionId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeIngestionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIngestionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeIngestionInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + + if s.IngestionId == nil { + invalidParams.Add(aws.NewErrParamRequired("IngestionId")) + } + if s.IngestionId != nil && len(*s.IngestionId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("IngestionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeIngestionInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IngestionId != nil { + v := *s.IngestionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "IngestionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeIngestionOutput struct { + _ struct{} `type:"structure"` + + // Information about the ingestion. + Ingestion *Ingestion `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DescribeIngestionOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeIngestionOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Ingestion != nil { + v := s.Ingestion + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Ingestion", v, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDescribeIngestion = "DescribeIngestion" + +// DescribeIngestionRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Describes a SPICE ingestion. +// +// // Example sending a request using DescribeIngestionRequest. +// req := client.DescribeIngestionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeIngestion +func (c *Client) DescribeIngestionRequest(input *DescribeIngestionInput) DescribeIngestionRequest { + op := &aws.Operation{ + Name: opDescribeIngestion, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions/{IngestionId}", + } + + if input == nil { + input = &DescribeIngestionInput{} + } + + req := c.newRequest(op, input, &DescribeIngestionOutput{}) + return DescribeIngestionRequest{Request: req, Input: input, Copy: c.DescribeIngestionRequest} +} + +// DescribeIngestionRequest is the request type for the +// DescribeIngestion API operation. +type DescribeIngestionRequest struct { + *aws.Request + Input *DescribeIngestionInput + Copy func(*DescribeIngestionInput) DescribeIngestionRequest +} + +// Send marshals and sends the DescribeIngestion API request. +func (r DescribeIngestionRequest) Send(ctx context.Context) (*DescribeIngestionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeIngestionResponse{ + DescribeIngestionOutput: r.Request.Data.(*DescribeIngestionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeIngestionResponse is the response type for the +// DescribeIngestion API operation. +type DescribeIngestionResponse struct { + *DescribeIngestionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeIngestion request. +func (r *DescribeIngestionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DescribeTemplate.go b/service/quicksight/api_op_DescribeTemplate.go new file mode 100644 index 00000000000..7f730fc93aa --- /dev/null +++ b/service/quicksight/api_op_DescribeTemplate.go @@ -0,0 +1,211 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeTemplateInput struct { + _ struct{} `type:"structure"` + + // This is an optional field, when an alias name is provided, the version referenced + // by the alias is described. Refer to CreateTemplateAlias to create a template + // alias. $PUBLISHED is not supported for template. + AliasName *string `location:"querystring" locationName:"alias-name" min:"1" type:"string"` + + // AWS account ID that contains the template you are describing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // An ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + + // This is an optional field, when a version number is provided the corresponding + // version is describe, if it's not provided the latest version of the template + // is described. + VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` +} + +// String returns the string representation +func (s DescribeTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeTemplateInput"} + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AliasName", 1)) + } + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.TemplateId == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateId", 1)) + } + if s.VersionNumber != nil && *s.VersionNumber < 1 { + invalidParams.Add(aws.NewErrParamMinValue("VersionNumber", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeTemplateInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AliasName != nil { + v := *s.AliasName + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "alias-name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.VersionNumber != nil { + v := *s.VersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "version-number", protocol.Int64Value(v), metadata) + } + return nil +} + +type DescribeTemplateOutput struct { + _ struct{} `type:"structure"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The template structure of the object you want to describe. + Template *Template `type:"structure"` +} + +// String returns the string representation +func (s DescribeTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeTemplateOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Template != nil { + v := s.Template + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Template", v, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDescribeTemplate = "DescribeTemplate" + +// DescribeTemplateRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Describes a template's metadata. +// +// CLI syntax: +// +// aws quicksight describe-template --aws-account-id 111122223333 --template-id +// reports_test_template +// +// aws quicksight describe-template --aws-account-id 111122223333 --template-id +// reports_test_template --version-number-2 +// +// aws quicksight describe-template --aws-account-id 111122223333 --template-id +// reports_test_template --alias-name '\$LATEST' +// +// Users can explicitly describe the latest version of the dashboard by passing +// $LATEST to the alias-name parameter. $LATEST is an internally supported alias, +// which points to the latest version of the dashboard. +// +// // Example sending a request using DescribeTemplateRequest. +// req := client.DescribeTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplate +func (c *Client) DescribeTemplateRequest(input *DescribeTemplateInput) DescribeTemplateRequest { + op := &aws.Operation{ + Name: opDescribeTemplate, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}", + } + + if input == nil { + input = &DescribeTemplateInput{} + } + + req := c.newRequest(op, input, &DescribeTemplateOutput{}) + return DescribeTemplateRequest{Request: req, Input: input, Copy: c.DescribeTemplateRequest} +} + +// DescribeTemplateRequest is the request type for the +// DescribeTemplate API operation. +type DescribeTemplateRequest struct { + *aws.Request + Input *DescribeTemplateInput + Copy func(*DescribeTemplateInput) DescribeTemplateRequest +} + +// Send marshals and sends the DescribeTemplate API request. +func (r DescribeTemplateRequest) Send(ctx context.Context) (*DescribeTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeTemplateResponse{ + DescribeTemplateOutput: r.Request.Data.(*DescribeTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeTemplateResponse is the response type for the +// DescribeTemplate API operation. +type DescribeTemplateResponse struct { + *DescribeTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeTemplate request. +func (r *DescribeTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DescribeTemplateAlias.go b/service/quicksight/api_op_DescribeTemplateAlias.go new file mode 100644 index 00000000000..0059d2b6918 --- /dev/null +++ b/service/quicksight/api_op_DescribeTemplateAlias.go @@ -0,0 +1,200 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeTemplateAliasInput struct { + _ struct{} `type:"structure"` + + // The alias name. $PUBLISHED is not supported for template. + // + // AliasName is a required field + AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` + + // AWS account ID that contains the template alias you are describing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // An ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTemplateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTemplateAliasInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeTemplateAliasInput"} + + if s.AliasName == nil { + invalidParams.Add(aws.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AliasName", 1)) + } + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.TemplateId == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeTemplateAliasInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AliasName != nil { + v := *s.AliasName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AliasName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeTemplateAliasOutput struct { + _ struct{} `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // Information about the template alias. + TemplateAlias *TemplateAlias `type:"structure"` +} + +// String returns the string representation +func (s DescribeTemplateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeTemplateAliasOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateAlias != nil { + v := s.TemplateAlias + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "TemplateAlias", v, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDescribeTemplateAlias = "DescribeTemplateAlias" + +// DescribeTemplateAliasRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Describes the template aliases of a template. +// +// CLI syntax: +// +// aws quicksight describe-template-alias --aws-account-id 111122223333 --template-id +// 'reports_test_template' --alias-name 'STAGING' +// +// // Example sending a request using DescribeTemplateAliasRequest. +// req := client.DescribeTemplateAliasRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplateAlias +func (c *Client) DescribeTemplateAliasRequest(input *DescribeTemplateAliasInput) DescribeTemplateAliasRequest { + op := &aws.Operation{ + Name: opDescribeTemplateAlias, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}", + } + + if input == nil { + input = &DescribeTemplateAliasInput{} + } + + req := c.newRequest(op, input, &DescribeTemplateAliasOutput{}) + return DescribeTemplateAliasRequest{Request: req, Input: input, Copy: c.DescribeTemplateAliasRequest} +} + +// DescribeTemplateAliasRequest is the request type for the +// DescribeTemplateAlias API operation. +type DescribeTemplateAliasRequest struct { + *aws.Request + Input *DescribeTemplateAliasInput + Copy func(*DescribeTemplateAliasInput) DescribeTemplateAliasRequest +} + +// Send marshals and sends the DescribeTemplateAlias API request. +func (r DescribeTemplateAliasRequest) Send(ctx context.Context) (*DescribeTemplateAliasResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeTemplateAliasResponse{ + DescribeTemplateAliasOutput: r.Request.Data.(*DescribeTemplateAliasOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeTemplateAliasResponse is the response type for the +// DescribeTemplateAlias API operation. +type DescribeTemplateAliasResponse struct { + *DescribeTemplateAliasOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeTemplateAlias request. +func (r *DescribeTemplateAliasResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DescribeTemplatePermissions.go b/service/quicksight/api_op_DescribeTemplatePermissions.go new file mode 100644 index 00000000000..c126fa97a71 --- /dev/null +++ b/service/quicksight/api_op_DescribeTemplatePermissions.go @@ -0,0 +1,206 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type DescribeTemplatePermissionsInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the template you are describing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTemplatePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTemplatePermissionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeTemplatePermissionsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.TemplateId == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeTemplatePermissionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type DescribeTemplatePermissionsOutput struct { + _ struct{} `type:"structure"` + + // A list of resource permissions to be set on the template. + Permissions []ResourcePermission `min:"1" type:"list"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The ARN of the template. + TemplateArn *string `type:"string"` + + // The ID for the template. + TemplateId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeTemplatePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DescribeTemplatePermissionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Permissions != nil { + v := s.Permissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Permissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateArn != nil { + v := *s.TemplateArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opDescribeTemplatePermissions = "DescribeTemplatePermissions" + +// DescribeTemplatePermissionsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Describes read and write permissions on a template. +// +// CLI syntax: +// +// aws quicksight describe-template-permissions —aws-account-id 735340738645 +// —template-id reports_test_template +// +// // Example sending a request using DescribeTemplatePermissionsRequest. +// req := client.DescribeTemplatePermissionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplatePermissions +func (c *Client) DescribeTemplatePermissionsRequest(input *DescribeTemplatePermissionsInput) DescribeTemplatePermissionsRequest { + op := &aws.Operation{ + Name: opDescribeTemplatePermissions, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/permissions", + } + + if input == nil { + input = &DescribeTemplatePermissionsInput{} + } + + req := c.newRequest(op, input, &DescribeTemplatePermissionsOutput{}) + return DescribeTemplatePermissionsRequest{Request: req, Input: input, Copy: c.DescribeTemplatePermissionsRequest} +} + +// DescribeTemplatePermissionsRequest is the request type for the +// DescribeTemplatePermissions API operation. +type DescribeTemplatePermissionsRequest struct { + *aws.Request + Input *DescribeTemplatePermissionsInput + Copy func(*DescribeTemplatePermissionsInput) DescribeTemplatePermissionsRequest +} + +// Send marshals and sends the DescribeTemplatePermissions API request. +func (r DescribeTemplatePermissionsRequest) Send(ctx context.Context) (*DescribeTemplatePermissionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeTemplatePermissionsResponse{ + DescribeTemplatePermissionsOutput: r.Request.Data.(*DescribeTemplatePermissionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeTemplatePermissionsResponse is the response type for the +// DescribeTemplatePermissions API operation. +type DescribeTemplatePermissionsResponse struct { + *DescribeTemplatePermissionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeTemplatePermissions request. +func (r *DescribeTemplatePermissionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_DescribeUser.go b/service/quicksight/api_op_DescribeUser.go index 6b485c9ef80..cefc350e543 100644 --- a/service/quicksight/api_op_DescribeUser.go +++ b/service/quicksight/api_op_DescribeUser.go @@ -131,8 +131,6 @@ const opDescribeUser = "DescribeUser" // // Returns information about a user, given the user name. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/ . -// // The response is a user object that contains the user's Amazon Resource Name // (ARN), AWS Identity and Access Management (IAM) role, and email address. // diff --git a/service/quicksight/api_op_GetDashboardEmbedUrl.go b/service/quicksight/api_op_GetDashboardEmbedUrl.go index be9d4c394fd..4f2cba07f41 100644 --- a/service/quicksight/api_op_GetDashboardEmbedUrl.go +++ b/service/quicksight/api_op_GetDashboardEmbedUrl.go @@ -21,7 +21,7 @@ type GetDashboardEmbedUrlInput struct { // The ID for the dashboard, also added to IAM policy // // DashboardId is a required field - DashboardId *string `location:"uri" locationName:"DashboardId" type:"string" required:"true"` + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` // The authentication method the user uses to sign in (IAM only). // @@ -41,17 +41,15 @@ type GetDashboardEmbedUrlInput struct { UndoRedoDisabled *bool `location:"querystring" locationName:"undo-redo-disabled" type:"boolean"` // The Amazon QuickSight user's ARN, for use with QUICKSIGHT identity type. - // You can use this for any of the following: + // You can use this for any Amazon QuickSight users in your account (readers, + // authors, or admins) authenticated as one of the following: // - // * Amazon QuickSight users in your account (readers, authors, or admins) - // - // * AD users + // * Active Directory (AD) users or group members // // * Invited non-federated users // - // * Federated IAM users - // - // * Federated IAM role-based sessions + // * IAM users and IAM role-based sessions authenticated through Federated + // Single Sign-On using SAML, OpenID Connect, or IAM Federation UserArn *string `location:"querystring" locationName:"user-arn" type:"string"` } @@ -74,6 +72,9 @@ func (s *GetDashboardEmbedUrlInput) Validate() error { if s.DashboardId == nil { invalidParams.Add(aws.NewErrParamRequired("DashboardId")) } + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DashboardId", 1)) + } if len(s.IdentityType) == 0 { invalidParams.Add(aws.NewErrParamRequired("IdentityType")) } @@ -182,7 +183,7 @@ const opGetDashboardEmbedUrl = "GetDashboardEmbedUrl" // // Generates a server-side embeddable URL and authorization code. Before this // can work properly, first you need to configure the dashboards and user permissions. -// For more information, see Embedding Amazon QuickSight Dashboards (https://docs.aws.amazon.com/en_us/quicksight/latest/user/embedding.html). +// For more information, see Embedding Amazon QuickSight Dashboards (https://docs.aws.example.com/en_us/quicksight/latest/user/embedding.html). // // Currently, you can use GetDashboardEmbedURL only from the server, not from // the user’s browser. @@ -203,11 +204,17 @@ const opGetDashboardEmbedUrl = "GetDashboardEmbedUrl" // --user-role READER --session-name "embeddingsession" --email user123@example.com // --region us-east-1 // -// Get the URL for the embedded dashboard +// Get the URL for the embedded dashboard (IAM identity authentication): // // aws quicksight get-dashboard-embed-url --aws-account-id 111122223333 --dashboard-id // 1a1ac2b2-3fc3-4b44-5e5d-c6db6778df89 --identity-type IAM // +// Get the URL for the embedded dashboard (QUICKSIGHT identity authentication): +// +// aws quicksight get-dashboard-embed-url --aws-account-id 111122223333 --dashboard-id +// 1a1ac2b2-3fc3-4b44-5e5d-c6db6778df89 --identity-type QUICKSIGHT --user-arn +// arn:aws:quicksight:us-east-1:111122223333:user/default/embedding_quicksight_dashboard_role/embeddingsession +// // // Example sending a request using GetDashboardEmbedUrlRequest. // req := client.GetDashboardEmbedUrlRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/quicksight/api_op_ListDashboardVersions.go b/service/quicksight/api_op_ListDashboardVersions.go new file mode 100644 index 00000000000..17a1a3386c6 --- /dev/null +++ b/service/quicksight/api_op_ListDashboardVersions.go @@ -0,0 +1,271 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListDashboardVersionsInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the dashboard you are listing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dashboard. + // + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListDashboardVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDashboardVersionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListDashboardVersionsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DashboardId == nil { + invalidParams.Add(aws.NewErrParamRequired("DashboardId")) + } + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DashboardId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDashboardVersionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListDashboardVersionsOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains information about each version of the dashboard. + DashboardVersionSummaryList []DashboardVersionSummary `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s ListDashboardVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDashboardVersionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DashboardVersionSummaryList != nil { + v := s.DashboardVersionSummaryList + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DashboardVersionSummaryList", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opListDashboardVersions = "ListDashboardVersions" + +// ListDashboardVersionsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Lists all the versions of the dashboards in the Quicksight subscription. +// +// CLI syntax: +// +// aws quicksight list-template-versions —aws-account-id 111122223333 —template-id +// reports-test-template +// +// // Example sending a request using ListDashboardVersionsRequest. +// req := client.ListDashboardVersionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDashboardVersions +func (c *Client) ListDashboardVersionsRequest(input *ListDashboardVersionsInput) ListDashboardVersionsRequest { + op := &aws.Operation{ + Name: opListDashboardVersions, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/versions", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDashboardVersionsInput{} + } + + req := c.newRequest(op, input, &ListDashboardVersionsOutput{}) + return ListDashboardVersionsRequest{Request: req, Input: input, Copy: c.ListDashboardVersionsRequest} +} + +// ListDashboardVersionsRequest is the request type for the +// ListDashboardVersions API operation. +type ListDashboardVersionsRequest struct { + *aws.Request + Input *ListDashboardVersionsInput + Copy func(*ListDashboardVersionsInput) ListDashboardVersionsRequest +} + +// Send marshals and sends the ListDashboardVersions API request. +func (r ListDashboardVersionsRequest) Send(ctx context.Context) (*ListDashboardVersionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListDashboardVersionsResponse{ + ListDashboardVersionsOutput: r.Request.Data.(*ListDashboardVersionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListDashboardVersionsRequestPaginator returns a paginator for ListDashboardVersions. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListDashboardVersionsRequest(input) +// p := quicksight.NewListDashboardVersionsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListDashboardVersionsPaginator(req ListDashboardVersionsRequest) ListDashboardVersionsPaginator { + return ListDashboardVersionsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListDashboardVersionsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListDashboardVersionsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListDashboardVersionsPaginator struct { + aws.Pager +} + +func (p *ListDashboardVersionsPaginator) CurrentPage() *ListDashboardVersionsOutput { + return p.Pager.CurrentPage().(*ListDashboardVersionsOutput) +} + +// ListDashboardVersionsResponse is the response type for the +// ListDashboardVersions API operation. +type ListDashboardVersionsResponse struct { + *ListDashboardVersionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListDashboardVersions request. +func (r *ListDashboardVersionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_ListDashboards.go b/service/quicksight/api_op_ListDashboards.go new file mode 100644 index 00000000000..94fbf585a25 --- /dev/null +++ b/service/quicksight/api_op_ListDashboards.go @@ -0,0 +1,254 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListDashboardsInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the dashboards you are listing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListDashboardsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDashboardsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListDashboardsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDashboardsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListDashboardsOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains all of the dashboards shared with the user. Provides + // basic information about the dashboards. + DashboardSummaryList []DashboardSummary `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s ListDashboardsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDashboardsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DashboardSummaryList != nil { + v := s.DashboardSummaryList + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DashboardSummaryList", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opListDashboards = "ListDashboards" + +// ListDashboardsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Lists dashboards in the AWS account. +// +// CLI syntax: +// +// aws quicksight list-dashboards --aws-account-id 111122223333 --max-results +// 5 —next-token 'next-10' +// +// // Example sending a request using ListDashboardsRequest. +// req := client.ListDashboardsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDashboards +func (c *Client) ListDashboardsRequest(input *ListDashboardsInput) ListDashboardsRequest { + op := &aws.Operation{ + Name: opListDashboards, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/dashboards", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDashboardsInput{} + } + + req := c.newRequest(op, input, &ListDashboardsOutput{}) + return ListDashboardsRequest{Request: req, Input: input, Copy: c.ListDashboardsRequest} +} + +// ListDashboardsRequest is the request type for the +// ListDashboards API operation. +type ListDashboardsRequest struct { + *aws.Request + Input *ListDashboardsInput + Copy func(*ListDashboardsInput) ListDashboardsRequest +} + +// Send marshals and sends the ListDashboards API request. +func (r ListDashboardsRequest) Send(ctx context.Context) (*ListDashboardsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListDashboardsResponse{ + ListDashboardsOutput: r.Request.Data.(*ListDashboardsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListDashboardsRequestPaginator returns a paginator for ListDashboards. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListDashboardsRequest(input) +// p := quicksight.NewListDashboardsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListDashboardsPaginator(req ListDashboardsRequest) ListDashboardsPaginator { + return ListDashboardsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListDashboardsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListDashboardsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListDashboardsPaginator struct { + aws.Pager +} + +func (p *ListDashboardsPaginator) CurrentPage() *ListDashboardsOutput { + return p.Pager.CurrentPage().(*ListDashboardsOutput) +} + +// ListDashboardsResponse is the response type for the +// ListDashboards API operation. +type ListDashboardsResponse struct { + *ListDashboardsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListDashboards request. +func (r *ListDashboardsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_ListDataSets.go b/service/quicksight/api_op_ListDataSets.go new file mode 100644 index 00000000000..261f3914400 --- /dev/null +++ b/service/quicksight/api_op_ListDataSets.go @@ -0,0 +1,252 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListDataSetsInput struct { + _ struct{} `type:"structure"` + + // The AWS Account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListDataSetsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDataSetsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListDataSetsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDataSetsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListDataSetsOutput struct { + _ struct{} `type:"structure"` + + // The list of dataset summaries. + DataSetSummaries []DataSetSummary `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s ListDataSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDataSetsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSetSummaries != nil { + v := s.DataSetSummaries + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DataSetSummaries", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opListDataSets = "ListDataSets" + +// ListDataSetsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Lists all of the datasets belonging to this account in an AWS region. +// +// The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/* +// +// CLI syntax: aws quicksight list-data-sets --aws-account-id=111111111111 +// +// // Example sending a request using ListDataSetsRequest. +// req := client.ListDataSetsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDataSets +func (c *Client) ListDataSetsRequest(input *ListDataSetsInput) ListDataSetsRequest { + op := &aws.Operation{ + Name: opListDataSets, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sets", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDataSetsInput{} + } + + req := c.newRequest(op, input, &ListDataSetsOutput{}) + return ListDataSetsRequest{Request: req, Input: input, Copy: c.ListDataSetsRequest} +} + +// ListDataSetsRequest is the request type for the +// ListDataSets API operation. +type ListDataSetsRequest struct { + *aws.Request + Input *ListDataSetsInput + Copy func(*ListDataSetsInput) ListDataSetsRequest +} + +// Send marshals and sends the ListDataSets API request. +func (r ListDataSetsRequest) Send(ctx context.Context) (*ListDataSetsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListDataSetsResponse{ + ListDataSetsOutput: r.Request.Data.(*ListDataSetsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListDataSetsRequestPaginator returns a paginator for ListDataSets. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListDataSetsRequest(input) +// p := quicksight.NewListDataSetsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListDataSetsPaginator(req ListDataSetsRequest) ListDataSetsPaginator { + return ListDataSetsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListDataSetsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListDataSetsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListDataSetsPaginator struct { + aws.Pager +} + +func (p *ListDataSetsPaginator) CurrentPage() *ListDataSetsOutput { + return p.Pager.CurrentPage().(*ListDataSetsOutput) +} + +// ListDataSetsResponse is the response type for the +// ListDataSets API operation. +type ListDataSetsResponse struct { + *ListDataSetsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListDataSets request. +func (r *ListDataSetsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_ListDataSources.go b/service/quicksight/api_op_ListDataSources.go new file mode 100644 index 00000000000..41d624c0f82 --- /dev/null +++ b/service/quicksight/api_op_ListDataSources.go @@ -0,0 +1,252 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListDataSourcesInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListDataSourcesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDataSourcesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListDataSourcesInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDataSourcesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListDataSourcesOutput struct { + _ struct{} `type:"structure"` + + // A list of data sources. + DataSources []DataSource `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s ListDataSourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDataSourcesOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSources != nil { + v := s.DataSources + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DataSources", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opListDataSources = "ListDataSources" + +// ListDataSourcesRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Lists data sources in current AWS region that belong to this AWS account. +// +// The permissions resource is: arn:aws:quicksight:region:aws-account-id:datasource/* +// +// CLI syntax: aws quicksight list-data-sources --aws-account-id=111122223333 +// +// // Example sending a request using ListDataSourcesRequest. +// req := client.ListDataSourcesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDataSources +func (c *Client) ListDataSourcesRequest(input *ListDataSourcesInput) ListDataSourcesRequest { + op := &aws.Operation{ + Name: opListDataSources, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sources", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDataSourcesInput{} + } + + req := c.newRequest(op, input, &ListDataSourcesOutput{}) + return ListDataSourcesRequest{Request: req, Input: input, Copy: c.ListDataSourcesRequest} +} + +// ListDataSourcesRequest is the request type for the +// ListDataSources API operation. +type ListDataSourcesRequest struct { + *aws.Request + Input *ListDataSourcesInput + Copy func(*ListDataSourcesInput) ListDataSourcesRequest +} + +// Send marshals and sends the ListDataSources API request. +func (r ListDataSourcesRequest) Send(ctx context.Context) (*ListDataSourcesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListDataSourcesResponse{ + ListDataSourcesOutput: r.Request.Data.(*ListDataSourcesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListDataSourcesRequestPaginator returns a paginator for ListDataSources. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListDataSourcesRequest(input) +// p := quicksight.NewListDataSourcesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListDataSourcesPaginator(req ListDataSourcesRequest) ListDataSourcesPaginator { + return ListDataSourcesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListDataSourcesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListDataSourcesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListDataSourcesPaginator struct { + aws.Pager +} + +func (p *ListDataSourcesPaginator) CurrentPage() *ListDataSourcesOutput { + return p.Pager.CurrentPage().(*ListDataSourcesOutput) +} + +// ListDataSourcesResponse is the response type for the +// ListDataSources API operation. +type ListDataSourcesResponse struct { + *ListDataSourcesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListDataSources request. +func (r *ListDataSourcesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_ListIAMPolicyAssignments.go b/service/quicksight/api_op_ListIAMPolicyAssignments.go new file mode 100644 index 00000000000..b65f1e74e35 --- /dev/null +++ b/service/quicksight/api_op_ListIAMPolicyAssignments.go @@ -0,0 +1,225 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListIAMPolicyAssignmentsInput struct { + _ struct{} `type:"structure"` + + // The status of the assignment. + AssignmentStatus AssignmentStatus `type:"string" enum:"true"` + + // The AWS account ID that contains this IAM policy assignment. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The namespace for this assignment. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListIAMPolicyAssignmentsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIAMPolicyAssignmentsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListIAMPolicyAssignmentsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if s.Namespace == nil { + invalidParams.Add(aws.NewErrParamRequired("Namespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListIAMPolicyAssignmentsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if len(s.AssignmentStatus) > 0 { + v := s.AssignmentStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Namespace != nil { + v := *s.Namespace + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "Namespace", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListIAMPolicyAssignmentsOutput struct { + _ struct{} `type:"structure"` + + // Information describing the IAM policy assignments. + IAMPolicyAssignments []IAMPolicyAssignmentSummary `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s ListIAMPolicyAssignmentsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListIAMPolicyAssignmentsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.IAMPolicyAssignments != nil { + v := s.IAMPolicyAssignments + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "IAMPolicyAssignments", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opListIAMPolicyAssignments = "ListIAMPolicyAssignments" + +// ListIAMPolicyAssignmentsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Lists assignments in current QuickSight account. +// +// CLI syntax: +// +// aws quicksight list-iam-policy-assignments --aws-account-id=111122223333 +// --max-result=5 --assignment-status=ENABLED --namespace=default --region=us-east-1 +// --next-token=3 +// +// // Example sending a request using ListIAMPolicyAssignmentsRequest. +// req := client.ListIAMPolicyAssignmentsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIAMPolicyAssignments +func (c *Client) ListIAMPolicyAssignmentsRequest(input *ListIAMPolicyAssignmentsInput) ListIAMPolicyAssignmentsRequest { + op := &aws.Operation{ + Name: opListIAMPolicyAssignments, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments", + } + + if input == nil { + input = &ListIAMPolicyAssignmentsInput{} + } + + req := c.newRequest(op, input, &ListIAMPolicyAssignmentsOutput{}) + return ListIAMPolicyAssignmentsRequest{Request: req, Input: input, Copy: c.ListIAMPolicyAssignmentsRequest} +} + +// ListIAMPolicyAssignmentsRequest is the request type for the +// ListIAMPolicyAssignments API operation. +type ListIAMPolicyAssignmentsRequest struct { + *aws.Request + Input *ListIAMPolicyAssignmentsInput + Copy func(*ListIAMPolicyAssignmentsInput) ListIAMPolicyAssignmentsRequest +} + +// Send marshals and sends the ListIAMPolicyAssignments API request. +func (r ListIAMPolicyAssignmentsRequest) Send(ctx context.Context) (*ListIAMPolicyAssignmentsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListIAMPolicyAssignmentsResponse{ + ListIAMPolicyAssignmentsOutput: r.Request.Data.(*ListIAMPolicyAssignmentsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListIAMPolicyAssignmentsResponse is the response type for the +// ListIAMPolicyAssignments API operation. +type ListIAMPolicyAssignmentsResponse struct { + *ListIAMPolicyAssignmentsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListIAMPolicyAssignments request. +func (r *ListIAMPolicyAssignmentsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_ListIAMPolicyAssignmentsForUser.go b/service/quicksight/api_op_ListIAMPolicyAssignmentsForUser.go new file mode 100644 index 00000000000..eebde1ab637 --- /dev/null +++ b/service/quicksight/api_op_ListIAMPolicyAssignmentsForUser.go @@ -0,0 +1,234 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListIAMPolicyAssignmentsForUserInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID that contains the assignment. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The namespace of the assignment. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + + // The name of the user. + // + // UserName is a required field + UserName *string `location:"uri" locationName:"UserName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListIAMPolicyAssignmentsForUserInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIAMPolicyAssignmentsForUserInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListIAMPolicyAssignmentsForUserInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if s.Namespace == nil { + invalidParams.Add(aws.NewErrParamRequired("Namespace")) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListIAMPolicyAssignmentsForUserInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Namespace != nil { + v := *s.Namespace + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "Namespace", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.UserName != nil { + v := *s.UserName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "UserName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListIAMPolicyAssignmentsForUserOutput struct { + _ struct{} `type:"structure"` + + // Active assignments for this user. + ActiveAssignments []ActiveIAMPolicyAssignment `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s ListIAMPolicyAssignmentsForUserOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListIAMPolicyAssignmentsForUserOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.ActiveAssignments != nil { + v := s.ActiveAssignments + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ActiveAssignments", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opListIAMPolicyAssignmentsForUser = "ListIAMPolicyAssignmentsForUser" + +// ListIAMPolicyAssignmentsForUserRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Lists all the assignments and the ARNs for the associated IAM policies assigned +// to the specified user and the group or groups that the user belongs to. +// +// CLI syntax: +// +// aws quicksight list-iam-policy-assignments-for-user --aws-account-id=111122223333 +// --user-name=user5 --namespace=default --max-result=6 --region=us-east-1 +// +// // Example sending a request using ListIAMPolicyAssignmentsForUserRequest. +// req := client.ListIAMPolicyAssignmentsForUserRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIAMPolicyAssignmentsForUser +func (c *Client) ListIAMPolicyAssignmentsForUserRequest(input *ListIAMPolicyAssignmentsForUserInput) ListIAMPolicyAssignmentsForUserRequest { + op := &aws.Operation{ + Name: opListIAMPolicyAssignmentsForUser, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}/iam-policy-assignments", + } + + if input == nil { + input = &ListIAMPolicyAssignmentsForUserInput{} + } + + req := c.newRequest(op, input, &ListIAMPolicyAssignmentsForUserOutput{}) + return ListIAMPolicyAssignmentsForUserRequest{Request: req, Input: input, Copy: c.ListIAMPolicyAssignmentsForUserRequest} +} + +// ListIAMPolicyAssignmentsForUserRequest is the request type for the +// ListIAMPolicyAssignmentsForUser API operation. +type ListIAMPolicyAssignmentsForUserRequest struct { + *aws.Request + Input *ListIAMPolicyAssignmentsForUserInput + Copy func(*ListIAMPolicyAssignmentsForUserInput) ListIAMPolicyAssignmentsForUserRequest +} + +// Send marshals and sends the ListIAMPolicyAssignmentsForUser API request. +func (r ListIAMPolicyAssignmentsForUserRequest) Send(ctx context.Context) (*ListIAMPolicyAssignmentsForUserResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListIAMPolicyAssignmentsForUserResponse{ + ListIAMPolicyAssignmentsForUserOutput: r.Request.Data.(*ListIAMPolicyAssignmentsForUserOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListIAMPolicyAssignmentsForUserResponse is the response type for the +// ListIAMPolicyAssignmentsForUser API operation. +type ListIAMPolicyAssignmentsForUserResponse struct { + *ListIAMPolicyAssignmentsForUserOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListIAMPolicyAssignmentsForUser request. +func (r *ListIAMPolicyAssignmentsForUserResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_ListIngestions.go b/service/quicksight/api_op_ListIngestions.go new file mode 100644 index 00000000000..2f5889673df --- /dev/null +++ b/service/quicksight/api_op_ListIngestions.go @@ -0,0 +1,263 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListIngestionsInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset used in the ingestion. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListIngestionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIngestionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListIngestionsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListIngestionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListIngestionsOutput struct { + _ struct{} `type:"structure"` + + // A list of the ingestions. + Ingestions []Ingestion `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s ListIngestionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListIngestionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Ingestions != nil { + v := s.Ingestions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Ingestions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opListIngestions = "ListIngestions" + +// ListIngestionsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Lists the history of SPICE ingestions for a dataset. +// +// // Example sending a request using ListIngestionsRequest. +// req := client.ListIngestionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIngestions +func (c *Client) ListIngestionsRequest(input *ListIngestionsInput) ListIngestionsRequest { + op := &aws.Operation{ + Name: opListIngestions, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListIngestionsInput{} + } + + req := c.newRequest(op, input, &ListIngestionsOutput{}) + return ListIngestionsRequest{Request: req, Input: input, Copy: c.ListIngestionsRequest} +} + +// ListIngestionsRequest is the request type for the +// ListIngestions API operation. +type ListIngestionsRequest struct { + *aws.Request + Input *ListIngestionsInput + Copy func(*ListIngestionsInput) ListIngestionsRequest +} + +// Send marshals and sends the ListIngestions API request. +func (r ListIngestionsRequest) Send(ctx context.Context) (*ListIngestionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListIngestionsResponse{ + ListIngestionsOutput: r.Request.Data.(*ListIngestionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListIngestionsRequestPaginator returns a paginator for ListIngestions. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListIngestionsRequest(input) +// p := quicksight.NewListIngestionsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListIngestionsPaginator(req ListIngestionsRequest) ListIngestionsPaginator { + return ListIngestionsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListIngestionsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListIngestionsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListIngestionsPaginator struct { + aws.Pager +} + +func (p *ListIngestionsPaginator) CurrentPage() *ListIngestionsOutput { + return p.Pager.CurrentPage().(*ListIngestionsOutput) +} + +// ListIngestionsResponse is the response type for the +// ListIngestions API operation. +type ListIngestionsResponse struct { + *ListIngestionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListIngestions request. +func (r *ListIngestionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_ListTagsForResource.go b/service/quicksight/api_op_ListTagsForResource.go new file mode 100644 index 00000000000..d239c07eb31 --- /dev/null +++ b/service/quicksight/api_op_ListTagsForResource.go @@ -0,0 +1,168 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource you want a list of tags for. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTagsForResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTagsForResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ResourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the resource. + Tags []Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTagsForResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Lists the tags assigned to a resource. +// +// CLI syntax: +// +// * aws quicksight list-tags-for-resource --resource-arn arn:aws:quicksight:us-east-1:111111111111:dataset/dataset1 +// --region us-east-1 +// +// // Example sending a request using ListTagsForResourceRequest. +// req := client.ListTagsForResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTagsForResource +func (c *Client) ListTagsForResourceRequest(input *ListTagsForResourceInput) ListTagsForResourceRequest { + op := &aws.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/resources/{ResourceArn}/tags", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req := c.newRequest(op, input, &ListTagsForResourceOutput{}) + return ListTagsForResourceRequest{Request: req, Input: input, Copy: c.ListTagsForResourceRequest} +} + +// ListTagsForResourceRequest is the request type for the +// ListTagsForResource API operation. +type ListTagsForResourceRequest struct { + *aws.Request + Input *ListTagsForResourceInput + Copy func(*ListTagsForResourceInput) ListTagsForResourceRequest +} + +// Send marshals and sends the ListTagsForResource API request. +func (r ListTagsForResourceRequest) Send(ctx context.Context) (*ListTagsForResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTagsForResourceResponse{ + ListTagsForResourceOutput: r.Request.Data.(*ListTagsForResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListTagsForResourceResponse is the response type for the +// ListTagsForResource API operation. +type ListTagsForResourceResponse struct { + *ListTagsForResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTagsForResource request. +func (r *ListTagsForResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_ListTemplateAliases.go b/service/quicksight/api_op_ListTemplateAliases.go new file mode 100644 index 00000000000..8661073a195 --- /dev/null +++ b/service/quicksight/api_op_ListTemplateAliases.go @@ -0,0 +1,271 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListTemplateAliasesInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the template aliases you are listing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-result" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + + // The ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTemplateAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTemplateAliasesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTemplateAliasesInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if s.TemplateId == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTemplateAliasesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-result", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListTemplateAliasesOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // A structure containing the list of template aliases. + TemplateAliasList []TemplateAlias `type:"list"` +} + +// String returns the string representation +func (s ListTemplateAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTemplateAliasesOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateAliasList != nil { + v := s.TemplateAliasList + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "TemplateAliasList", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opListTemplateAliases = "ListTemplateAliases" + +// ListTemplateAliasesRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Lists all the aliases of a template. +// +// CLI syntax: +// +// aws quicksight list-template-aliases --aws-account-id 111122223333 —template-id +// 'reports_test_template' +// +// // Example sending a request using ListTemplateAliasesRequest. +// req := client.ListTemplateAliasesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplateAliases +func (c *Client) ListTemplateAliasesRequest(input *ListTemplateAliasesInput) ListTemplateAliasesRequest { + op := &aws.Operation{ + Name: opListTemplateAliases, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTemplateAliasesInput{} + } + + req := c.newRequest(op, input, &ListTemplateAliasesOutput{}) + return ListTemplateAliasesRequest{Request: req, Input: input, Copy: c.ListTemplateAliasesRequest} +} + +// ListTemplateAliasesRequest is the request type for the +// ListTemplateAliases API operation. +type ListTemplateAliasesRequest struct { + *aws.Request + Input *ListTemplateAliasesInput + Copy func(*ListTemplateAliasesInput) ListTemplateAliasesRequest +} + +// Send marshals and sends the ListTemplateAliases API request. +func (r ListTemplateAliasesRequest) Send(ctx context.Context) (*ListTemplateAliasesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTemplateAliasesResponse{ + ListTemplateAliasesOutput: r.Request.Data.(*ListTemplateAliasesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListTemplateAliasesRequestPaginator returns a paginator for ListTemplateAliases. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListTemplateAliasesRequest(input) +// p := quicksight.NewListTemplateAliasesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListTemplateAliasesPaginator(req ListTemplateAliasesRequest) ListTemplateAliasesPaginator { + return ListTemplateAliasesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListTemplateAliasesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListTemplateAliasesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListTemplateAliasesPaginator struct { + aws.Pager +} + +func (p *ListTemplateAliasesPaginator) CurrentPage() *ListTemplateAliasesOutput { + return p.Pager.CurrentPage().(*ListTemplateAliasesOutput) +} + +// ListTemplateAliasesResponse is the response type for the +// ListTemplateAliases API operation. +type ListTemplateAliasesResponse struct { + *ListTemplateAliasesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTemplateAliases request. +func (r *ListTemplateAliasesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_ListTemplateVersions.go b/service/quicksight/api_op_ListTemplateVersions.go new file mode 100644 index 00000000000..19e08bfac3f --- /dev/null +++ b/service/quicksight/api_op_ListTemplateVersions.go @@ -0,0 +1,271 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListTemplateVersionsInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the templates you are listing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + + // The ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTemplateVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTemplateVersionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTemplateVersionsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if s.TemplateId == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTemplateVersionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-results", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListTemplateVersionsOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // A structure containing a list of all the versions of the specified template. + TemplateVersionSummaryList []TemplateVersionSummary `type:"list"` +} + +// String returns the string representation +func (s ListTemplateVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTemplateVersionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateVersionSummaryList != nil { + v := s.TemplateVersionSummaryList + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "TemplateVersionSummaryList", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opListTemplateVersions = "ListTemplateVersions" + +// ListTemplateVersionsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Lists all the versions of the templates in the Quicksight account. +// +// CLI syntax: +// +// aws quicksight list-template-versions --aws-account-id 111122223333 --aws-account-id +// 196359894473 --template-id reports-test-template +// +// // Example sending a request using ListTemplateVersionsRequest. +// req := client.ListTemplateVersionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplateVersions +func (c *Client) ListTemplateVersionsRequest(input *ListTemplateVersionsInput) ListTemplateVersionsRequest { + op := &aws.Operation{ + Name: opListTemplateVersions, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/versions", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTemplateVersionsInput{} + } + + req := c.newRequest(op, input, &ListTemplateVersionsOutput{}) + return ListTemplateVersionsRequest{Request: req, Input: input, Copy: c.ListTemplateVersionsRequest} +} + +// ListTemplateVersionsRequest is the request type for the +// ListTemplateVersions API operation. +type ListTemplateVersionsRequest struct { + *aws.Request + Input *ListTemplateVersionsInput + Copy func(*ListTemplateVersionsInput) ListTemplateVersionsRequest +} + +// Send marshals and sends the ListTemplateVersions API request. +func (r ListTemplateVersionsRequest) Send(ctx context.Context) (*ListTemplateVersionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTemplateVersionsResponse{ + ListTemplateVersionsOutput: r.Request.Data.(*ListTemplateVersionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListTemplateVersionsRequestPaginator returns a paginator for ListTemplateVersions. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListTemplateVersionsRequest(input) +// p := quicksight.NewListTemplateVersionsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListTemplateVersionsPaginator(req ListTemplateVersionsRequest) ListTemplateVersionsPaginator { + return ListTemplateVersionsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListTemplateVersionsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListTemplateVersionsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListTemplateVersionsPaginator struct { + aws.Pager +} + +func (p *ListTemplateVersionsPaginator) CurrentPage() *ListTemplateVersionsOutput { + return p.Pager.CurrentPage().(*ListTemplateVersionsOutput) +} + +// ListTemplateVersionsResponse is the response type for the +// ListTemplateVersions API operation. +type ListTemplateVersionsResponse struct { + *ListTemplateVersionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTemplateVersions request. +func (r *ListTemplateVersionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_ListTemplates.go b/service/quicksight/api_op_ListTemplates.go new file mode 100644 index 00000000000..5bd66f09241 --- /dev/null +++ b/service/quicksight/api_op_ListTemplates.go @@ -0,0 +1,253 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListTemplatesInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the templates you are listing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-result" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +} + +// String returns the string representation +func (s ListTemplatesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTemplatesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTemplatesInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTemplatesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MaxResults != nil { + v := *s.MaxResults + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "max-result", protocol.Int64Value(v), metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "next-token", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListTemplatesOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // A structure containing information about the templates in the list. + TemplateSummaryList []TemplateSummary `type:"list"` +} + +// String returns the string representation +func (s ListTemplatesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTemplatesOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateSummaryList != nil { + v := s.TemplateSummaryList + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "TemplateSummaryList", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opListTemplates = "ListTemplates" + +// ListTemplatesRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Lists all the templates in the QuickSight account. +// +// CLI syntax: +// +// aws quicksight list-templates --aws-account-id 111122223333 --max-results +// 1 —next-token AYADeJuxwOypAndSoOn +// +// // Example sending a request using ListTemplatesRequest. +// req := client.ListTemplatesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplates +func (c *Client) ListTemplatesRequest(input *ListTemplatesInput) ListTemplatesRequest { + op := &aws.Operation{ + Name: opListTemplates, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/templates", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTemplatesInput{} + } + + req := c.newRequest(op, input, &ListTemplatesOutput{}) + return ListTemplatesRequest{Request: req, Input: input, Copy: c.ListTemplatesRequest} +} + +// ListTemplatesRequest is the request type for the +// ListTemplates API operation. +type ListTemplatesRequest struct { + *aws.Request + Input *ListTemplatesInput + Copy func(*ListTemplatesInput) ListTemplatesRequest +} + +// Send marshals and sends the ListTemplates API request. +func (r ListTemplatesRequest) Send(ctx context.Context) (*ListTemplatesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTemplatesResponse{ + ListTemplatesOutput: r.Request.Data.(*ListTemplatesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListTemplatesRequestPaginator returns a paginator for ListTemplates. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListTemplatesRequest(input) +// p := quicksight.NewListTemplatesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListTemplatesPaginator(req ListTemplatesRequest) ListTemplatesPaginator { + return ListTemplatesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListTemplatesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListTemplatesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListTemplatesPaginator struct { + aws.Pager +} + +func (p *ListTemplatesPaginator) CurrentPage() *ListTemplatesOutput { + return p.Pager.CurrentPage().(*ListTemplatesOutput) +} + +// ListTemplatesResponse is the response type for the +// ListTemplates API operation. +type ListTemplatesResponse struct { + *ListTemplatesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTemplates request. +func (r *ListTemplatesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_ListUserGroups.go b/service/quicksight/api_op_ListUserGroups.go index aa4f78f8d6a..ac623576910 100644 --- a/service/quicksight/api_op_ListUserGroups.go +++ b/service/quicksight/api_op_ListUserGroups.go @@ -168,8 +168,6 @@ const opListUserGroups = "ListUserGroups" // Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member // of. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/ . -// // The response is a one or more group objects. // // CLI Sample: diff --git a/service/quicksight/api_op_ListUsers.go b/service/quicksight/api_op_ListUsers.go index 8111ead7828..758f62a28db 100644 --- a/service/quicksight/api_op_ListUsers.go +++ b/service/quicksight/api_op_ListUsers.go @@ -149,8 +149,6 @@ const opListUsers = "ListUsers" // // Returns a list of all of the Amazon QuickSight users belonging to this account. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/* . -// // The response is a list of user objects, containing each user's Amazon Resource // Name (ARN), AWS Identity and Access Management (IAM) role, and email address. // diff --git a/service/quicksight/api_op_RegisterUser.go b/service/quicksight/api_op_RegisterUser.go index 44b5f2adc89..cba51184f2e 100644 --- a/service/quicksight/api_op_RegisterUser.go +++ b/service/quicksight/api_op_RegisterUser.go @@ -48,7 +48,7 @@ type RegisterUserInput struct { // scenarios, for example when you are registering an IAM user or an Amazon // QuickSight user. You can register multiple users using the same IAM role // if each user has a different session name. For more information on assuming - // IAM roles, see assume-role (https://docs.aws.amazon.com/cli/latest/reference/sts/assume-role.html) + // IAM roles, see assume-role (https://docs.aws.example.com/cli/latest/reference/sts/assume-role.html) // in the AWS CLI Reference. SessionName *string `min:"2" type:"string"` @@ -56,16 +56,21 @@ type RegisterUserInput struct { // are registering. UserName *string `min:"1" type:"string"` - // The Amazon QuickSight role of the user. The user role can be one of the following: + // The Amazon QuickSight role for the user. The user role can be one of the + // following: // // * READER: A user who has read-only access to dashboards. // - // * AUTHOR: A user who can create data sources, data sets, analyses, and + // * AUTHOR: A user who can create data sources, datasets, analyses, and // dashboards. // // * ADMIN: A user who is an author, who can also manage Amazon QuickSight // settings. // + // * RESTRICTED_READER: This role isn't currently available for use. + // + // * RESTRICTED_AUTHOR: This role isn't currently available for use. + // // UserRole is a required field UserRole UserRole `type:"string" required:"true" enum:"true"` } @@ -222,13 +227,6 @@ const opRegisterUser = "RegisterUser" // AWS Identity and Access Management (IAM) identity or role specified in the // request. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/ . -// -// The condition resource is the Amazon Resource Name (ARN) for the IAM user -// or role, and the session name. -// -// The condition keys are quicksight:IamArn and quicksight:SessionName. -// // CLI Sample: // // aws quicksight register-user -\-aws-account-id=111122223333 -\-namespace=default diff --git a/service/quicksight/api_op_TagResource.go b/service/quicksight/api_op_TagResource.go new file mode 100644 index 00000000000..88ed4e7090d --- /dev/null +++ b/service/quicksight/api_op_TagResource.go @@ -0,0 +1,207 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource you want to tag. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" type:"string" required:"true"` + + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the resource. + // + // Tags is a required field + Tags []Tag `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TagResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if s.Tags == nil { + invalidParams.Add(aws.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ResourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opTagResource = "TagResource" + +// TagResourceRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Assigns a tag or tags to a resource. +// +// Assigns one or more tags (key-value pairs) to the specified QuickSight resource. +// Tags can help you organize and categorize your resources. You can also use +// them to scope user permissions, by granting a user permission to access or +// change only resources with certain tag values. You can use the TagResource +// action with a resource that already has tags. If you specify a new tag key +// for the resource, this tag is appended to the list of tags associated with +// the resource. If you specify a tag key that is already associated with the +// resource, the new tag value that you specify replaces the previous value +// for that tag. +// +// You can associate as many as 50 tags with a resource. QuickSight supports +// tagging on data-set, data-source, dashboard, template. +// +// Tagging for QuickSight works in a similar was to tagging for other AWS services, +// except for the following: +// +// * You can't use tags to track AWS costs for QuickSight, because QuickSight +// costs are based on users and SPICE capacity, which aren't taggable resources. +// +// * QuickSight doesn't currently support the Tag Editor for AWS Resource +// Groups. +// +// CLI syntax to tag a resource: +// +// * aws quicksight tag-resource --resource-arn arn:aws:quicksight:us-east-1:111111111111:dataset/dataset1 +// --tags Key=K1,Value=V1 Key=K2,Value=V2 --region us-east-1 +// +// // Example sending a request using TagResourceRequest. +// req := client.TagResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/TagResource +func (c *Client) TagResourceRequest(input *TagResourceInput) TagResourceRequest { + op := &aws.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/resources/{ResourceArn}/tags", + } + + if input == nil { + input = &TagResourceInput{} + } + + req := c.newRequest(op, input, &TagResourceOutput{}) + return TagResourceRequest{Request: req, Input: input, Copy: c.TagResourceRequest} +} + +// TagResourceRequest is the request type for the +// TagResource API operation. +type TagResourceRequest struct { + *aws.Request + Input *TagResourceInput + Copy func(*TagResourceInput) TagResourceRequest +} + +// Send marshals and sends the TagResource API request. +func (r TagResourceRequest) Send(ctx context.Context) (*TagResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &TagResourceResponse{ + TagResourceOutput: r.Request.Data.(*TagResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// TagResourceResponse is the response type for the +// TagResource API operation. +type TagResourceResponse struct { + *TagResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// TagResource request. +func (r *TagResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UntagResource.go b/service/quicksight/api_op_UntagResource.go new file mode 100644 index 00000000000..68e36818a94 --- /dev/null +++ b/service/quicksight/api_op_UntagResource.go @@ -0,0 +1,177 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource you to untag. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" type:"string" required:"true"` + + // The keys of the key-value pairs for the resource tag or tags assigned to + // the resource. + // + // TagKeys is a required field + TagKeys []string `location:"querystring" locationName:"keys" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UntagResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if s.TagKeys == nil { + invalidParams.Add(aws.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UntagResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ResourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TagKeys != nil { + v := s.TagKeys + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.QueryTarget, "keys", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UntagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Removes a tag or tags from a resource. +// +// CLI syntax: +// +// * aws quicksight untag-resource --resource-arn arn:aws:quicksight:us-east-1:111111111111:dataset/dataset1 +// --tag-keys K1 K2 --region us-east-1 +// +// // Example sending a request using UntagResourceRequest. +// req := client.UntagResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UntagResource +func (c *Client) UntagResourceRequest(input *UntagResourceInput) UntagResourceRequest { + op := &aws.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/resources/{ResourceArn}/tags", + } + + if input == nil { + input = &UntagResourceInput{} + } + + req := c.newRequest(op, input, &UntagResourceOutput{}) + return UntagResourceRequest{Request: req, Input: input, Copy: c.UntagResourceRequest} +} + +// UntagResourceRequest is the request type for the +// UntagResource API operation. +type UntagResourceRequest struct { + *aws.Request + Input *UntagResourceInput + Copy func(*UntagResourceInput) UntagResourceRequest +} + +// Send marshals and sends the UntagResource API request. +func (r UntagResourceRequest) Send(ctx context.Context) (*UntagResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UntagResourceResponse{ + UntagResourceOutput: r.Request.Data.(*UntagResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UntagResourceResponse is the response type for the +// UntagResource API operation. +type UntagResourceResponse struct { + *UntagResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UntagResource request. +func (r *UntagResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UpdateDashboard.go b/service/quicksight/api_op_UpdateDashboard.go new file mode 100644 index 00000000000..44950157204 --- /dev/null +++ b/service/quicksight/api_op_UpdateDashboard.go @@ -0,0 +1,307 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateDashboardInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the dashboard you are updating. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dashboard. + // + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + + // Publishing options when creating a dashboard. + // + // * AvailabilityStatus for AdHocFilteringOption - This can be either ENABLED + // or DISABLED. When This is set to set to DISABLED, QuickSight disables + // the left filter pane on the published dashboard, which can be used for + // AdHoc filtering. Enabled by default. + // + // * AvailabilityStatus for ExportToCSVOption - This can be either ENABLED + // or DISABLED. The visual option to export data to CSV is disabled when + // this is set to DISABLED. Enabled by default. + // + // * VisibilityState for SheetControlsOption - This can be either COLLAPSED + // or EXPANDED. The sheet controls pane is collapsed by default when set + // to true. Collapsed by default. + DashboardPublishOptions *DashboardPublishOptions `type:"structure"` + + // The display name of the dashboard. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A structure that contains the parameters of the dashboard. + Parameters *Parameters `type:"structure"` + + // The template or analysis from which the dashboard is created. The SouceTemplate + // entity accepts the Arn of the template and also references to replacement + // datasets for the placeholders set when creating the template. The replacement + // datasets need to follow the same schema as the datasets for which placeholders + // were created when creating the template. + // + // SourceEntity is a required field + SourceEntity *DashboardSourceEntity `type:"structure" required:"true"` + + // A description for the first version of the dashboard being created. + VersionDescription *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateDashboardInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDashboardInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateDashboardInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DashboardId == nil { + invalidParams.Add(aws.NewErrParamRequired("DashboardId")) + } + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DashboardId", 1)) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + + if s.SourceEntity == nil { + invalidParams.Add(aws.NewErrParamRequired("SourceEntity")) + } + if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("VersionDescription", 1)) + } + if s.Parameters != nil { + if err := s.Parameters.Validate(); err != nil { + invalidParams.AddNested("Parameters", err.(aws.ErrInvalidParams)) + } + } + if s.SourceEntity != nil { + if err := s.SourceEntity.Validate(); err != nil { + invalidParams.AddNested("SourceEntity", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDashboardInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DashboardPublishOptions != nil { + v := s.DashboardPublishOptions + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DashboardPublishOptions", v, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Parameters != nil { + v := s.Parameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Parameters", v, metadata) + } + if s.SourceEntity != nil { + v := s.SourceEntity + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SourceEntity", v, metadata) + } + if s.VersionDescription != nil { + v := *s.VersionDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VersionDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateDashboardOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the resource. + Arn *string `type:"string"` + + // The creation status of the request. + CreationStatus ResourceStatus `type:"string" enum:"true"` + + // The ID for the dashboard. + DashboardId *string `min:"1" type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `type:"integer"` + + // The ARN of the dashboard, including the version number. + VersionArn *string `type:"string"` +} + +// String returns the string representation +func (s UpdateDashboardOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDashboardOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.CreationStatus) > 0 { + v := s.CreationStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreationStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Status != nil { + v := *s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Status", protocol.Int64Value(v), metadata) + } + if s.VersionArn != nil { + v := *s.VersionArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VersionArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opUpdateDashboard = "UpdateDashboard" + +// UpdateDashboardRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Updates a dashboard in the AWS account. +// +// CLI syntax: +// +// aws quicksight update-dashboard --aws-account-id 111122223333 --dashboard-id +// 123123123 --dashboard-name "test-update102" --source-entity SourceTemplate={Arn=arn:aws:quicksight:us-west-2:111122223333:template/sales-report-template2} +// --data-set-references DataSetPlaceholder=SalesDataSet,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/0e251aef-9ebf-46e1-b852-eb4fa33c1d3a +// +// aws quicksight update-dashboard --cli-input-json file://update-dashboard.json +// +// // Example sending a request using UpdateDashboardRequest. +// req := client.UpdateDashboardRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboard +func (c *Client) UpdateDashboardRequest(input *UpdateDashboardInput) UpdateDashboardRequest { + op := &aws.Operation{ + Name: opUpdateDashboard, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}", + } + + if input == nil { + input = &UpdateDashboardInput{} + } + + req := c.newRequest(op, input, &UpdateDashboardOutput{}) + return UpdateDashboardRequest{Request: req, Input: input, Copy: c.UpdateDashboardRequest} +} + +// UpdateDashboardRequest is the request type for the +// UpdateDashboard API operation. +type UpdateDashboardRequest struct { + *aws.Request + Input *UpdateDashboardInput + Copy func(*UpdateDashboardInput) UpdateDashboardRequest +} + +// Send marshals and sends the UpdateDashboard API request. +func (r UpdateDashboardRequest) Send(ctx context.Context) (*UpdateDashboardResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateDashboardResponse{ + UpdateDashboardOutput: r.Request.Data.(*UpdateDashboardOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateDashboardResponse is the response type for the +// UpdateDashboard API operation. +type UpdateDashboardResponse struct { + *UpdateDashboardOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateDashboard request. +func (r *UpdateDashboardResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UpdateDashboardPermissions.go b/service/quicksight/api_op_UpdateDashboardPermissions.go new file mode 100644 index 00000000000..fb0cf530a34 --- /dev/null +++ b/service/quicksight/api_op_UpdateDashboardPermissions.go @@ -0,0 +1,301 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateDashboardPermissionsInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the dashboard you are updating. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dashboard. + // + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + + // The permissions that you want to grant on this resource. + GrantPermissions []ResourcePermission `min:"1" type:"list"` + + // The permissions that you want to revoke from this resource. + RevokePermissions []ResourcePermission `min:"1" type:"list"` +} + +// String returns the string representation +func (s UpdateDashboardPermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDashboardPermissionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateDashboardPermissionsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DashboardId == nil { + invalidParams.Add(aws.NewErrParamRequired("DashboardId")) + } + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DashboardId", 1)) + } + if s.GrantPermissions != nil && len(s.GrantPermissions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GrantPermissions", 1)) + } + if s.RevokePermissions != nil && len(s.RevokePermissions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RevokePermissions", 1)) + } + if s.GrantPermissions != nil { + for i, v := range s.GrantPermissions { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GrantPermissions", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.RevokePermissions != nil { + for i, v := range s.RevokePermissions { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RevokePermissions", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDashboardPermissionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.GrantPermissions != nil { + v := s.GrantPermissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "GrantPermissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.RevokePermissions != nil { + v := s.RevokePermissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "RevokePermissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateDashboardPermissionsOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the dashboard. + DashboardArn *string `type:"string"` + + // The ID for the dashboard. + DashboardId *string `min:"1" type:"string"` + + // Information about the permissions on the dashboard. + Permissions []ResourcePermission `min:"1" type:"list"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s UpdateDashboardPermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDashboardPermissionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DashboardArn != nil { + v := *s.DashboardArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Permissions != nil { + v := s.Permissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Permissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opUpdateDashboardPermissions = "UpdateDashboardPermissions" + +// UpdateDashboardPermissionsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Updates read and write permissions on a dashboard. +// +// CLI syntax: +// +// aws quicksight update-dashboard-permissions —cli-input-json file://update-permission.json +// +// A sample update-permissions.json for granting read only permissions: +// +// { "AwsAccountId": "111122223333", "DashboardId": "reports_test_report", "GrantPermissions": +// [ { "Principal": "arn:aws:quicksight:us-east-1:111122223333:user/default/user2", +// "Actions": [ "quicksight:DescribeDashboard", "quicksight:ListDashboardVersions", +// "quicksight:DescribeDashboardVersion", "quicksight:QueryDashboard" ] } ] +// } +// +// A sample update-permissions.json for granting read and write permissions: +// +// { "AwsAccountId": "111122223333", "DashboardId": "reports_test_report", "GrantPermissions": +// [ { "Principal": "arn:aws:quicksight:us-east-1:111122223333:user/default/user2", +// "Actions": [ "quicksight:DescribeDashboard", "quicksight:ListDashboardVersions", +// "quicksight:DescribeDashboardVersion", "quicksight:QueryDashboard", "quicksight:DescribeDashboardPermissions", +// "quicksight:UpdateDashboardPermissions", "quicksight:DeleteDashboardVersion", +// "quicksight:DeleteDashboard", "quicksight:UpdateDashboard", "quicksight:UpdateDashboardPublishedVersion", +// ] } ] } +// +// A sample update-permissions.json for revoking write permissions: +// +// { "AwsAccountId": "111122223333", "DashboardId": "reports_test_report", "RevokePermissions": +// [ { "Principal": "arn:aws:quicksight:us-east-1:111122223333:user/default/user2", +// "Actions": [ "quicksight:DescribeDashboardPermissions", "quicksight:UpdateDashboardPermissions", +// "quicksight:DeleteDashboardVersion", "quicksight:DeleteDashboard", "quicksight:UpdateDashboard", +// "quicksight:UpdateDashboardPublishedVersion", ] } ] } +// +// A sample update-permissions.json for revoking read and write permissions: +// +// { "AwsAccountId": "111122223333", "DashboardId": "reports_test_report", "RevokePermissions": +// [ { "Principal": "arn:aws:quicksight:us-east-1:111122223333:user/default/user2", +// "Actions": [ "quicksight:DescribeDashboard", "quicksight:ListDashboardVersions", +// "quicksight:DescribeDashboardVersion", "quicksight:QueryDashboard", "quicksight:DescribeDashboardPermissions", +// "quicksight:UpdateDashboardPermissions", "quicksight:DeleteDashboardVersion", +// "quicksight:DeleteDashboard", "quicksight:UpdateDashboard", "quicksight:UpdateDashboardPublishedVersion", +// ] } ] } +// +// To obtain the principal name of a QuickSight user or group, you can use describe-group +// or describe-user. For example: +// +// aws quicksight describe-user --aws-account-id 111122223333 --namespace default +// --user-name user2 --region us-east-1 { "User": { "Arn": "arn:aws:quicksight:us-east-1:111122223333:user/default/user2", +// "Active": true, "Email": "user2@example.com", "Role": "ADMIN", "UserName": +// "user2", "PrincipalId": "federated/iam/abcd2abcdabcdeabc5ab5" }, "RequestId": +// "8f74bb31-6291-448a-a71c-a765a44bae31", "Status": 200 } +// +// // Example sending a request using UpdateDashboardPermissionsRequest. +// req := client.UpdateDashboardPermissionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboardPermissions +func (c *Client) UpdateDashboardPermissionsRequest(input *UpdateDashboardPermissionsInput) UpdateDashboardPermissionsRequest { + op := &aws.Operation{ + Name: opUpdateDashboardPermissions, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/permissions", + } + + if input == nil { + input = &UpdateDashboardPermissionsInput{} + } + + req := c.newRequest(op, input, &UpdateDashboardPermissionsOutput{}) + return UpdateDashboardPermissionsRequest{Request: req, Input: input, Copy: c.UpdateDashboardPermissionsRequest} +} + +// UpdateDashboardPermissionsRequest is the request type for the +// UpdateDashboardPermissions API operation. +type UpdateDashboardPermissionsRequest struct { + *aws.Request + Input *UpdateDashboardPermissionsInput + Copy func(*UpdateDashboardPermissionsInput) UpdateDashboardPermissionsRequest +} + +// Send marshals and sends the UpdateDashboardPermissions API request. +func (r UpdateDashboardPermissionsRequest) Send(ctx context.Context) (*UpdateDashboardPermissionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateDashboardPermissionsResponse{ + UpdateDashboardPermissionsOutput: r.Request.Data.(*UpdateDashboardPermissionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateDashboardPermissionsResponse is the response type for the +// UpdateDashboardPermissions API operation. +type UpdateDashboardPermissionsResponse struct { + *UpdateDashboardPermissionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateDashboardPermissions request. +func (r *UpdateDashboardPermissionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UpdateDashboardPublishedVersion.go b/service/quicksight/api_op_UpdateDashboardPublishedVersion.go new file mode 100644 index 00000000000..c95d142ecc9 --- /dev/null +++ b/service/quicksight/api_op_UpdateDashboardPublishedVersion.go @@ -0,0 +1,209 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateDashboardPublishedVersionInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the dashboard you are updating. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dashboard. + // + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + + // The version number of the dashboard. + // + // VersionNumber is a required field + VersionNumber *int64 `location:"uri" locationName:"VersionNumber" min:"1" type:"long" required:"true"` +} + +// String returns the string representation +func (s UpdateDashboardPublishedVersionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDashboardPublishedVersionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateDashboardPublishedVersionInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DashboardId == nil { + invalidParams.Add(aws.NewErrParamRequired("DashboardId")) + } + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DashboardId", 1)) + } + + if s.VersionNumber == nil { + invalidParams.Add(aws.NewErrParamRequired("VersionNumber")) + } + if s.VersionNumber != nil && *s.VersionNumber < 1 { + invalidParams.Add(aws.NewErrParamMinValue("VersionNumber", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDashboardPublishedVersionInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.VersionNumber != nil { + v := *s.VersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "VersionNumber", protocol.Int64Value(v), metadata) + } + return nil +} + +type UpdateDashboardPublishedVersionOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the dashboard. + DashboardArn *string `type:"string"` + + // The ID for the dashboard. + DashboardId *string `min:"1" type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s UpdateDashboardPublishedVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDashboardPublishedVersionOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DashboardArn != nil { + v := *s.DashboardArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opUpdateDashboardPublishedVersion = "UpdateDashboardPublishedVersion" + +// UpdateDashboardPublishedVersionRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Updates the published version of a dashboard. +// +// CLI syntax: +// +// aws quicksight update-dashboard-published-version --aws-account-id 111122223333 +// --dashboard-id dashboard-w1 ---version-number 2 +// +// // Example sending a request using UpdateDashboardPublishedVersionRequest. +// req := client.UpdateDashboardPublishedVersionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboardPublishedVersion +func (c *Client) UpdateDashboardPublishedVersionRequest(input *UpdateDashboardPublishedVersionInput) UpdateDashboardPublishedVersionRequest { + op := &aws.Operation{ + Name: opUpdateDashboardPublishedVersion, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/versions/{VersionNumber}", + } + + if input == nil { + input = &UpdateDashboardPublishedVersionInput{} + } + + req := c.newRequest(op, input, &UpdateDashboardPublishedVersionOutput{}) + return UpdateDashboardPublishedVersionRequest{Request: req, Input: input, Copy: c.UpdateDashboardPublishedVersionRequest} +} + +// UpdateDashboardPublishedVersionRequest is the request type for the +// UpdateDashboardPublishedVersion API operation. +type UpdateDashboardPublishedVersionRequest struct { + *aws.Request + Input *UpdateDashboardPublishedVersionInput + Copy func(*UpdateDashboardPublishedVersionInput) UpdateDashboardPublishedVersionRequest +} + +// Send marshals and sends the UpdateDashboardPublishedVersion API request. +func (r UpdateDashboardPublishedVersionRequest) Send(ctx context.Context) (*UpdateDashboardPublishedVersionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateDashboardPublishedVersionResponse{ + UpdateDashboardPublishedVersionOutput: r.Request.Data.(*UpdateDashboardPublishedVersionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateDashboardPublishedVersionResponse is the response type for the +// UpdateDashboardPublishedVersion API operation. +type UpdateDashboardPublishedVersionResponse struct { + *UpdateDashboardPublishedVersionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateDashboardPublishedVersion request. +func (r *UpdateDashboardPublishedVersionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UpdateDataSet.go b/service/quicksight/api_op_UpdateDataSet.go new file mode 100644 index 00000000000..450f1487f57 --- /dev/null +++ b/service/quicksight/api_op_UpdateDataSet.go @@ -0,0 +1,373 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateDataSetInput struct { + _ struct{} `type:"structure"` + + // The AWS Account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // Groupings of columns that work together in certain QuickSight features. Currently + // only geospatial hierarchy is supported. + ColumnGroups []ColumnGroup `min:"1" type:"list"` + + // The ID for the dataset you want to create. This is unique per region per + // AWS account. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // Indicates whether or not you want to import the data into SPICE. + // + // ImportMode is a required field + ImportMode DataSetImportMode `type:"string" required:"true" enum:"true"` + + // Configures the combination and transformation of the data from the physical + // tables. + LogicalTableMap map[string]LogicalTable `min:"1" type:"map"` + + // The display name for the dataset. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Declares the physical tables that are available in the underlying data sources. + // + // PhysicalTableMap is a required field + PhysicalTableMap map[string]PhysicalTable `min:"1" type:"map" required:"true"` + + // Row-level security configuration on the data you want to create. + RowLevelPermissionDataSet *RowLevelPermissionDataSet `type:"structure"` +} + +// String returns the string representation +func (s UpdateDataSetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDataSetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateDataSetInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.ColumnGroups != nil && len(s.ColumnGroups) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ColumnGroups", 1)) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + if len(s.ImportMode) == 0 { + invalidParams.Add(aws.NewErrParamRequired("ImportMode")) + } + if s.LogicalTableMap != nil && len(s.LogicalTableMap) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("LogicalTableMap", 1)) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + + if s.PhysicalTableMap == nil { + invalidParams.Add(aws.NewErrParamRequired("PhysicalTableMap")) + } + if s.PhysicalTableMap != nil && len(s.PhysicalTableMap) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PhysicalTableMap", 1)) + } + if s.ColumnGroups != nil { + for i, v := range s.ColumnGroups { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ColumnGroups", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.LogicalTableMap != nil { + for i, v := range s.LogicalTableMap { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LogicalTableMap", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.PhysicalTableMap != nil { + for i, v := range s.PhysicalTableMap { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PhysicalTableMap", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.RowLevelPermissionDataSet != nil { + if err := s.RowLevelPermissionDataSet.Validate(); err != nil { + invalidParams.AddNested("RowLevelPermissionDataSet", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDataSetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ColumnGroups != nil { + v := s.ColumnGroups + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ColumnGroups", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if len(s.ImportMode) > 0 { + v := s.ImportMode + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ImportMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.LogicalTableMap != nil { + v := s.LogicalTableMap + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "LogicalTableMap", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetFields(k1, v1) + } + ms0.End() + + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PhysicalTableMap != nil { + v := s.PhysicalTableMap + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "PhysicalTableMap", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetFields(k1, v1) + } + ms0.End() + + } + if s.RowLevelPermissionDataSet != nil { + v := s.RowLevelPermissionDataSet + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RowLevelPermissionDataSet", v, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateDataSetOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the dataset. + Arn *string `type:"string"` + + // The ID for the dataset you want to create. This is unique per region per + // AWS account. + DataSetId *string `type:"string"` + + // The Amazon Resource Name (ARN) for the ingestion, which is triggered as a + // result of dataset creation if the import mode is SPICE + IngestionArn *string `type:"string"` + + // The ID of the ingestion, which is triggered as a result of dataset creation + // if the import mode is SPICE + IngestionId *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s UpdateDataSetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDataSetOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IngestionArn != nil { + v := *s.IngestionArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IngestionArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IngestionId != nil { + v := *s.IngestionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IngestionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opUpdateDataSet = "UpdateDataSet" + +// UpdateDataSetRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Updates a dataset. +// +// CLI syntax: +// +// aws quicksight update-data-set \ +// +// --aws-account-id=111122223333 \ +// +// --data-set-id=unique-data-set-id \ +// +// --name='My dataset' \ +// +// --import-mode=SPICE \ +// +// --physical-table-map='{ +// +// "physical-table-id": { +// +// "RelationalTable": { +// +// "DataSourceArn": "arn:aws:quicksight:us-west-2:111111111111:datasource/data-source-id", +// +// "Name": "table1", +// +// "InputColumns": [ +// +// { +// +// "Name": "column1", +// +// "Type": "STRING" +// +// } +// +// ] +// +// } +// +// }' +// +// // Example sending a request using UpdateDataSetRequest. +// req := client.UpdateDataSetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSet +func (c *Client) UpdateDataSetRequest(input *UpdateDataSetInput) UpdateDataSetRequest { + op := &aws.Operation{ + Name: opUpdateDataSet, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}", + } + + if input == nil { + input = &UpdateDataSetInput{} + } + + req := c.newRequest(op, input, &UpdateDataSetOutput{}) + return UpdateDataSetRequest{Request: req, Input: input, Copy: c.UpdateDataSetRequest} +} + +// UpdateDataSetRequest is the request type for the +// UpdateDataSet API operation. +type UpdateDataSetRequest struct { + *aws.Request + Input *UpdateDataSetInput + Copy func(*UpdateDataSetInput) UpdateDataSetRequest +} + +// Send marshals and sends the UpdateDataSet API request. +func (r UpdateDataSetRequest) Send(ctx context.Context) (*UpdateDataSetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateDataSetResponse{ + UpdateDataSetOutput: r.Request.Data.(*UpdateDataSetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateDataSetResponse is the response type for the +// UpdateDataSet API operation. +type UpdateDataSetResponse struct { + *UpdateDataSetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateDataSet request. +func (r *UpdateDataSetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UpdateDataSetPermissions.go b/service/quicksight/api_op_UpdateDataSetPermissions.go new file mode 100644 index 00000000000..86724dea625 --- /dev/null +++ b/service/quicksight/api_op_UpdateDataSetPermissions.go @@ -0,0 +1,251 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateDataSetPermissionsInput struct { + _ struct{} `type:"structure"` + + // The AWS Account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dataset you want to create. This is unique per region per + // AWS account. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // The resource permissions that you want to grant to the dataset. + GrantPermissions []ResourcePermission `min:"1" type:"list"` + + // The resource permissions that you want to revoke from the dataset. + RevokePermissions []ResourcePermission `min:"1" type:"list"` +} + +// String returns the string representation +func (s UpdateDataSetPermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDataSetPermissionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateDataSetPermissionsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSetId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetId")) + } + if s.GrantPermissions != nil && len(s.GrantPermissions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GrantPermissions", 1)) + } + if s.RevokePermissions != nil && len(s.RevokePermissions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RevokePermissions", 1)) + } + if s.GrantPermissions != nil { + for i, v := range s.GrantPermissions { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GrantPermissions", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.RevokePermissions != nil { + for i, v := range s.RevokePermissions { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RevokePermissions", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDataSetPermissionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.GrantPermissions != nil { + v := s.GrantPermissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "GrantPermissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.RevokePermissions != nil { + v := s.RevokePermissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "RevokePermissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateDataSetPermissionsOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the dataset. + DataSetArn *string `type:"string"` + + // The ID for the dataset you want to create. This is unique per region per + // AWS account. + DataSetId *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s UpdateDataSetPermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDataSetPermissionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSetArn != nil { + v := *s.DataSetArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opUpdateDataSetPermissions = "UpdateDataSetPermissions" + +// UpdateDataSetPermissionsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Updates the permissions on a dataset. +// +// The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/data-set-id +// +// CLI syntax: +// +// aws quicksight update-data-set-permissions \ +// +// --aws-account-id=111122223333 \ +// +// --data-set-id=unique-data-set-id \ +// +// --grant-permissions='[{"Principal":"arn:aws:quicksight:us-east-1:111122223333:user/default/user1","Actions":["quicksight:DescribeDataSet","quicksight:DescribeDataSetPermissions","quicksight:PassDataSet","quicksight:ListIngestions","quicksight:DescribeIngestion"]}]' +// \ +// +// --revoke-permissions='[{"Principal":"arn:aws:quicksight:us-east-1:111122223333:user/default/user2","Actions":["quicksight:UpdateDataSet","quicksight:DeleteDataSet","quicksight:UpdateDataSetPermissions","quicksight:CreateIngestion","quicksight:CancelIngestion"]}]' +// +// // Example sending a request using UpdateDataSetPermissionsRequest. +// req := client.UpdateDataSetPermissionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSetPermissions +func (c *Client) UpdateDataSetPermissionsRequest(input *UpdateDataSetPermissionsInput) UpdateDataSetPermissionsRequest { + op := &aws.Operation{ + Name: opUpdateDataSetPermissions, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/permissions", + } + + if input == nil { + input = &UpdateDataSetPermissionsInput{} + } + + req := c.newRequest(op, input, &UpdateDataSetPermissionsOutput{}) + return UpdateDataSetPermissionsRequest{Request: req, Input: input, Copy: c.UpdateDataSetPermissionsRequest} +} + +// UpdateDataSetPermissionsRequest is the request type for the +// UpdateDataSetPermissions API operation. +type UpdateDataSetPermissionsRequest struct { + *aws.Request + Input *UpdateDataSetPermissionsInput + Copy func(*UpdateDataSetPermissionsInput) UpdateDataSetPermissionsRequest +} + +// Send marshals and sends the UpdateDataSetPermissions API request. +func (r UpdateDataSetPermissionsRequest) Send(ctx context.Context) (*UpdateDataSetPermissionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateDataSetPermissionsResponse{ + UpdateDataSetPermissionsOutput: r.Request.Data.(*UpdateDataSetPermissionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateDataSetPermissionsResponse is the response type for the +// UpdateDataSetPermissions API operation. +type UpdateDataSetPermissionsResponse struct { + *UpdateDataSetPermissionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateDataSetPermissions request. +func (r *UpdateDataSetPermissionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UpdateDataSource.go b/service/quicksight/api_op_UpdateDataSource.go new file mode 100644 index 00000000000..5c18f6665b3 --- /dev/null +++ b/service/quicksight/api_op_UpdateDataSource.go @@ -0,0 +1,280 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateDataSourceInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The credentials QuickSight uses to connect to your underlying source. Currently + // only username/password based credentials are supported. + Credentials *DataSourceCredentials `type:"structure" sensitive:"true"` + + // The ID of the data source. This is unique per AWS Region per AWS account. + // + // DataSourceId is a required field + DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` + + // The parameters QuickSight uses to connect to your underlying source. + DataSourceParameters *DataSourceParameters `type:"structure"` + + // A display name for the data source. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // SSL properties that apply when QuickSight connects to your underlying source. + SslProperties *SslProperties `type:"structure"` + + // You need to use this parameter only when you want QuickSight to use a VPC + // connection when connecting to your underlying source. + VpcConnectionProperties *VpcConnectionProperties `type:"structure"` +} + +// String returns the string representation +func (s UpdateDataSourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDataSourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateDataSourceInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSourceId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSourceId")) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + if s.Credentials != nil { + if err := s.Credentials.Validate(); err != nil { + invalidParams.AddNested("Credentials", err.(aws.ErrInvalidParams)) + } + } + if s.DataSourceParameters != nil { + if err := s.DataSourceParameters.Validate(); err != nil { + invalidParams.AddNested("DataSourceParameters", err.(aws.ErrInvalidParams)) + } + } + if s.VpcConnectionProperties != nil { + if err := s.VpcConnectionProperties.Validate(); err != nil { + invalidParams.AddNested("VpcConnectionProperties", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDataSourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Credentials != nil { + v := s.Credentials + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Credentials", v, metadata) + } + if s.DataSourceParameters != nil { + v := s.DataSourceParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DataSourceParameters", v, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SslProperties != nil { + v := s.SslProperties + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SslProperties", v, metadata) + } + if s.VpcConnectionProperties != nil { + v := s.VpcConnectionProperties + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "VpcConnectionProperties", v, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSourceId != nil { + v := *s.DataSourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateDataSourceOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the data source. + Arn *string `type:"string"` + + // The ID of the data source. This is unique per AWS Region per AWS account. + DataSourceId *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The update status of the data source's last update. + UpdateStatus ResourceStatus `type:"string" enum:"true"` +} + +// String returns the string representation +func (s UpdateDataSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDataSourceOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSourceId != nil { + v := *s.DataSourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.UpdateStatus) > 0 { + v := s.UpdateStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "UpdateStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opUpdateDataSource = "UpdateDataSource" + +// UpdateDataSourceRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Updates a data source. +// +// The permissions resource is arn:aws:quicksight:region:aws-account-id:datasource/data-source-id +// +// CLI syntax: +// +// aws quicksight update-data-source \ +// +// --aws-account-id=111122223333 \ +// +// --data-source-id=unique-data-source-id \ +// +// --name='My Data Source' \ +// +// --data-source-parameters='{"PostgreSqlParameters":{"Host":"my-db-host.example.com","Port":1234,"Database":"my-db"}}' +// \ +// +// --credentials='{"CredentialPair":{"Username":"username","Password":"password"}} +// +// // Example sending a request using UpdateDataSourceRequest. +// req := client.UpdateDataSourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSource +func (c *Client) UpdateDataSourceRequest(input *UpdateDataSourceInput) UpdateDataSourceRequest { + op := &aws.Operation{ + Name: opUpdateDataSource, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}", + } + + if input == nil { + input = &UpdateDataSourceInput{} + } + + req := c.newRequest(op, input, &UpdateDataSourceOutput{}) + return UpdateDataSourceRequest{Request: req, Input: input, Copy: c.UpdateDataSourceRequest} +} + +// UpdateDataSourceRequest is the request type for the +// UpdateDataSource API operation. +type UpdateDataSourceRequest struct { + *aws.Request + Input *UpdateDataSourceInput + Copy func(*UpdateDataSourceInput) UpdateDataSourceRequest +} + +// Send marshals and sends the UpdateDataSource API request. +func (r UpdateDataSourceRequest) Send(ctx context.Context) (*UpdateDataSourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateDataSourceResponse{ + UpdateDataSourceOutput: r.Request.Data.(*UpdateDataSourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateDataSourceResponse is the response type for the +// UpdateDataSource API operation. +type UpdateDataSourceResponse struct { + *UpdateDataSourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateDataSource request. +func (r *UpdateDataSourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UpdateDataSourcePermissions.go b/service/quicksight/api_op_UpdateDataSourcePermissions.go new file mode 100644 index 00000000000..f125014d64b --- /dev/null +++ b/service/quicksight/api_op_UpdateDataSourcePermissions.go @@ -0,0 +1,251 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateDataSourcePermissionsInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the data source. This is unique per AWS Region per AWS account. + // + // DataSourceId is a required field + DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` + + // A list of resource permissions that you want to grant on the data source. + GrantPermissions []ResourcePermission `min:"1" type:"list"` + + // A list of resource permissions that you want to revoke on the data source. + RevokePermissions []ResourcePermission `min:"1" type:"list"` +} + +// String returns the string representation +func (s UpdateDataSourcePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDataSourcePermissionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateDataSourcePermissionsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.DataSourceId == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSourceId")) + } + if s.GrantPermissions != nil && len(s.GrantPermissions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GrantPermissions", 1)) + } + if s.RevokePermissions != nil && len(s.RevokePermissions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RevokePermissions", 1)) + } + if s.GrantPermissions != nil { + for i, v := range s.GrantPermissions { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GrantPermissions", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.RevokePermissions != nil { + for i, v := range s.RevokePermissions { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RevokePermissions", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDataSourcePermissionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.GrantPermissions != nil { + v := s.GrantPermissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "GrantPermissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.RevokePermissions != nil { + v := s.RevokePermissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "RevokePermissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSourceId != nil { + v := *s.DataSourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "DataSourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateDataSourcePermissionsOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the data source. + DataSourceArn *string `type:"string"` + + // The ID of the data source. This is unique per AWS Region per AWS account. + DataSourceId *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s UpdateDataSourcePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateDataSourcePermissionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSourceArn != nil { + v := *s.DataSourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSourceId != nil { + v := *s.DataSourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opUpdateDataSourcePermissions = "UpdateDataSourcePermissions" + +// UpdateDataSourcePermissionsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Updates the permissions to a data source. +// +// The permissions resource is arn:aws:quicksight:region:aws-account-id:datasource/data-source-id +// +// CLI syntax: +// +// aws quicksight update-data-source-permissions \ +// +// --aws-account-id=111122223333 \ +// +// --data-source-id=unique-data-source-id \ +// +// --name='My Data Source' \ +// +// --grant-permissions='[{"Principal":"arn:aws:quicksight:us-east-1:111122223333:user/default/user1","Actions":["quicksight:DescribeDataSource","quicksight:DescribeDataSourcePermissions","quicksight:PassDataSource"]}]' +// \ +// +// --revoke-permissions='[{"Principal":"arn:aws:quicksight:us-east-1:111122223333:user/default/user2","Actions":["quicksight:UpdateDataSource","quicksight:DeleteDataSource","quicksight:UpdateDataSourcePermissions"]}]' +// +// // Example sending a request using UpdateDataSourcePermissionsRequest. +// req := client.UpdateDataSourcePermissionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSourcePermissions +func (c *Client) UpdateDataSourcePermissionsRequest(input *UpdateDataSourcePermissionsInput) UpdateDataSourcePermissionsRequest { + op := &aws.Operation{ + Name: opUpdateDataSourcePermissions, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}/permissions", + } + + if input == nil { + input = &UpdateDataSourcePermissionsInput{} + } + + req := c.newRequest(op, input, &UpdateDataSourcePermissionsOutput{}) + return UpdateDataSourcePermissionsRequest{Request: req, Input: input, Copy: c.UpdateDataSourcePermissionsRequest} +} + +// UpdateDataSourcePermissionsRequest is the request type for the +// UpdateDataSourcePermissions API operation. +type UpdateDataSourcePermissionsRequest struct { + *aws.Request + Input *UpdateDataSourcePermissionsInput + Copy func(*UpdateDataSourcePermissionsInput) UpdateDataSourcePermissionsRequest +} + +// Send marshals and sends the UpdateDataSourcePermissions API request. +func (r UpdateDataSourcePermissionsRequest) Send(ctx context.Context) (*UpdateDataSourcePermissionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateDataSourcePermissionsResponse{ + UpdateDataSourcePermissionsOutput: r.Request.Data.(*UpdateDataSourcePermissionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateDataSourcePermissionsResponse is the response type for the +// UpdateDataSourcePermissions API operation. +type UpdateDataSourcePermissionsResponse struct { + *UpdateDataSourcePermissionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateDataSourcePermissions request. +func (r *UpdateDataSourcePermissionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UpdateIAMPolicyAssignment.go b/service/quicksight/api_op_UpdateIAMPolicyAssignment.go new file mode 100644 index 00000000000..9a4bb3877ed --- /dev/null +++ b/service/quicksight/api_op_UpdateIAMPolicyAssignment.go @@ -0,0 +1,303 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateIAMPolicyAssignmentInput struct { + _ struct{} `type:"structure"` + + // The name of the assignment. It must be unique within an AWS account. + // + // AssignmentName is a required field + AssignmentName *string `location:"uri" locationName:"AssignmentName" min:"1" type:"string" required:"true"` + + // The status of an assignment: + // + // * ENABLED - Anything specified in this assignment is used while creating + // the data source. + // + // * DISABLED - This assignment isn't used while creating the data source. + // + // * DRAFT - Assignment is an unfinished draft and isn't used while creating + // the data source. + AssignmentStatus AssignmentStatus `type:"string" enum:"true"` + + // The AWS account ID that contains the IAM policy assignment. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // QuickSight users and/or groups that you want to assign to the specified IAM + // policy. + Identities map[string][]string `type:"map"` + + // The namespace of the assignment. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + + // An IAM policy ARN that will be applied to specified QuickSight users and + // groups in this assignment. + PolicyArn *string `type:"string"` +} + +// String returns the string representation +func (s UpdateIAMPolicyAssignmentInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateIAMPolicyAssignmentInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateIAMPolicyAssignmentInput"} + + if s.AssignmentName == nil { + invalidParams.Add(aws.NewErrParamRequired("AssignmentName")) + } + if s.AssignmentName != nil && len(*s.AssignmentName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AssignmentName", 1)) + } + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.Namespace == nil { + invalidParams.Add(aws.NewErrParamRequired("Namespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateIAMPolicyAssignmentInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if len(s.AssignmentStatus) > 0 { + v := s.AssignmentStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.Identities != nil { + v := s.Identities + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Identities", metadata) + ms0.Start() + for k1, v1 := range v { + ls1 := ms0.List(k1) + ls1.Start() + for _, v2 := range v1 { + ls1.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v2)}) + } + ls1.End() + } + ms0.End() + + } + if s.PolicyArn != nil { + v := *s.PolicyArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PolicyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AssignmentName != nil { + v := *s.AssignmentName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AssignmentName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Namespace != nil { + v := *s.Namespace + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "Namespace", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateIAMPolicyAssignmentOutput struct { + _ struct{} `type:"structure"` + + // The ID of the assignment. + AssignmentId *string `type:"string"` + + // The name of the assignment. + AssignmentName *string `min:"1" type:"string"` + + // The status of the assignment: + // + // * ENABLED - Anything specified in this assignment is used while creating + // the data source. + // + // * DISABLED - This assignment isn't used while creating the data source. + // + // * DRAFT - Assignment is an unfinished draft and isn't used while creating + // the data source. + AssignmentStatus AssignmentStatus `type:"string" enum:"true"` + + // QuickSight users and/or groups that are assigned to this IAM policy. + Identities map[string][]string `type:"map"` + + // The IAM policy ARN assigned to the QuickSight users and groups specified + // in this request. + PolicyArn *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s UpdateIAMPolicyAssignmentOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateIAMPolicyAssignmentOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.AssignmentId != nil { + v := *s.AssignmentId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AssignmentName != nil { + v := *s.AssignmentName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.AssignmentStatus) > 0 { + v := s.AssignmentStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.Identities != nil { + v := s.Identities + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Identities", metadata) + ms0.Start() + for k1, v1 := range v { + ls1 := ms0.List(k1) + ls1.Start() + for _, v2 := range v1 { + ls1.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v2)}) + } + ls1.End() + } + ms0.End() + + } + if s.PolicyArn != nil { + v := *s.PolicyArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PolicyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opUpdateIAMPolicyAssignment = "UpdateIAMPolicyAssignment" + +// UpdateIAMPolicyAssignmentRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Updates an existing assignment. This operation updates only the optional +// parameter or parameters that are specified in the request. +// +// CLI syntax: +// +// aws quicksight update-iam-policy-assignment --aws-account-id=111122223333 +// --assignment-name=FullAccessAssignment --assignment-status=DRAFT --policy-arns=arn:aws:iam::aws:policy/AdministratorAccess +// --identities="user=user-1,user-2,group=admin" --namespace=default --region=us-east-1 +// +// // Example sending a request using UpdateIAMPolicyAssignmentRequest. +// req := client.UpdateIAMPolicyAssignmentRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateIAMPolicyAssignment +func (c *Client) UpdateIAMPolicyAssignmentRequest(input *UpdateIAMPolicyAssignmentInput) UpdateIAMPolicyAssignmentRequest { + op := &aws.Operation{ + Name: opUpdateIAMPolicyAssignment, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/{AssignmentName}", + } + + if input == nil { + input = &UpdateIAMPolicyAssignmentInput{} + } + + req := c.newRequest(op, input, &UpdateIAMPolicyAssignmentOutput{}) + return UpdateIAMPolicyAssignmentRequest{Request: req, Input: input, Copy: c.UpdateIAMPolicyAssignmentRequest} +} + +// UpdateIAMPolicyAssignmentRequest is the request type for the +// UpdateIAMPolicyAssignment API operation. +type UpdateIAMPolicyAssignmentRequest struct { + *aws.Request + Input *UpdateIAMPolicyAssignmentInput + Copy func(*UpdateIAMPolicyAssignmentInput) UpdateIAMPolicyAssignmentRequest +} + +// Send marshals and sends the UpdateIAMPolicyAssignment API request. +func (r UpdateIAMPolicyAssignmentRequest) Send(ctx context.Context) (*UpdateIAMPolicyAssignmentResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateIAMPolicyAssignmentResponse{ + UpdateIAMPolicyAssignmentOutput: r.Request.Data.(*UpdateIAMPolicyAssignmentOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateIAMPolicyAssignmentResponse is the response type for the +// UpdateIAMPolicyAssignment API operation. +type UpdateIAMPolicyAssignmentResponse struct { + *UpdateIAMPolicyAssignmentOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateIAMPolicyAssignment request. +func (r *UpdateIAMPolicyAssignmentResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UpdateTemplate.go b/service/quicksight/api_op_UpdateTemplate.go new file mode 100644 index 00000000000..08942810201 --- /dev/null +++ b/service/quicksight/api_op_UpdateTemplate.go @@ -0,0 +1,263 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateTemplateInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the template you are updating. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The name for the template. + Name *string `min:"1" type:"string"` + + // The source QuickSight entity from which this template is being created. Templates + // can be currently created from an Analysis or another template. + // + // SourceEntity is a required field + SourceEntity *TemplateSourceEntity `type:"structure" required:"true"` + + // The ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + + // A description of the current template version being created. This API created + // the first version of the template. Every time UpdateTemplate is called a + // new version is created. Each version of the template maintains a description + // of the version in the VersionDescription field. + VersionDescription *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTemplateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateTemplateInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + + if s.SourceEntity == nil { + invalidParams.Add(aws.NewErrParamRequired("SourceEntity")) + } + + if s.TemplateId == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateId", 1)) + } + if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("VersionDescription", 1)) + } + if s.SourceEntity != nil { + if err := s.SourceEntity.Validate(); err != nil { + invalidParams.AddNested("SourceEntity", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateTemplateInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SourceEntity != nil { + v := s.SourceEntity + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SourceEntity", v, metadata) + } + if s.VersionDescription != nil { + v := *s.VersionDescription + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VersionDescription", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateTemplateOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the template. + Arn *string `type:"string"` + + // The creation status of the template. + CreationStatus ResourceStatus `type:"string" enum:"true"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The ID for the template. + TemplateId *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) for the template, including the version information + // of the first version. + VersionArn *string `type:"string"` +} + +// String returns the string representation +func (s UpdateTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateTemplateOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.CreationStatus) > 0 { + v := s.CreationStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreationStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.VersionArn != nil { + v := *s.VersionArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VersionArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opUpdateTemplate = "UpdateTemplate" + +// UpdateTemplateRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Updates a template from an existing QuickSight analysis. +// +// CLI syntax: +// +// aws quicksight update-template --aws-account-id 111122223333 --template-id +// reports_test_template --data-set-references DataSetPlaceholder=reports,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/c684a204-d134-4c53-a63c-451f72c60c28 +// DataSetPlaceholder=Elblogs,DataSetArn=arn:aws:quicksight:us-west-2:111122223333:dataset/15840b7d-b542-4491-937b-602416b367b3 +// —source-entity SourceAnalysis=’{Arn=arn:aws:quicksight:us-west-2:111122223333:analysis/c5731fe9-4708-4598-8f6d-cf2a70875b6d} +// +// You can also pass in a json file: aws quicksight update-template —cli-input-json +// file://create-template.json +// +// // Example sending a request using UpdateTemplateRequest. +// req := client.UpdateTemplateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplate +func (c *Client) UpdateTemplateRequest(input *UpdateTemplateInput) UpdateTemplateRequest { + op := &aws.Operation{ + Name: opUpdateTemplate, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}", + } + + if input == nil { + input = &UpdateTemplateInput{} + } + + req := c.newRequest(op, input, &UpdateTemplateOutput{}) + return UpdateTemplateRequest{Request: req, Input: input, Copy: c.UpdateTemplateRequest} +} + +// UpdateTemplateRequest is the request type for the +// UpdateTemplate API operation. +type UpdateTemplateRequest struct { + *aws.Request + Input *UpdateTemplateInput + Copy func(*UpdateTemplateInput) UpdateTemplateRequest +} + +// Send marshals and sends the UpdateTemplate API request. +func (r UpdateTemplateRequest) Send(ctx context.Context) (*UpdateTemplateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateTemplateResponse{ + UpdateTemplateOutput: r.Request.Data.(*UpdateTemplateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateTemplateResponse is the response type for the +// UpdateTemplate API operation. +type UpdateTemplateResponse struct { + *UpdateTemplateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateTemplate request. +func (r *UpdateTemplateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UpdateTemplateAlias.go b/service/quicksight/api_op_UpdateTemplateAlias.go new file mode 100644 index 00000000000..7afb12468af --- /dev/null +++ b/service/quicksight/api_op_UpdateTemplateAlias.go @@ -0,0 +1,218 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateTemplateAliasInput struct { + _ struct{} `type:"structure"` + + // The alias name. + // + // AliasName is a required field + AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` + + // AWS account ID that contains the template aliases you are updating. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + + // The version number of the template. + // + // TemplateVersionNumber is a required field + TemplateVersionNumber *int64 `min:"1" type:"long" required:"true"` +} + +// String returns the string representation +func (s UpdateTemplateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTemplateAliasInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateTemplateAliasInput"} + + if s.AliasName == nil { + invalidParams.Add(aws.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AliasName", 1)) + } + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + + if s.TemplateId == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateId", 1)) + } + + if s.TemplateVersionNumber == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateVersionNumber")) + } + if s.TemplateVersionNumber != nil && *s.TemplateVersionNumber < 1 { + invalidParams.Add(aws.NewErrParamMinValue("TemplateVersionNumber", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateTemplateAliasInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.TemplateVersionNumber != nil { + v := *s.TemplateVersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateVersionNumber", protocol.Int64Value(v), metadata) + } + if s.AliasName != nil { + v := *s.AliasName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AliasName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateTemplateAliasOutput struct { + _ struct{} `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The template alias. + TemplateAlias *TemplateAlias `type:"structure"` +} + +// String returns the string representation +func (s UpdateTemplateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateTemplateAliasOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateAlias != nil { + v := s.TemplateAlias + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "TemplateAlias", v, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opUpdateTemplateAlias = "UpdateTemplateAlias" + +// UpdateTemplateAliasRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Updates the template alias of a template. +// +// CLI syntax: +// +// aws quicksight update-template-alias --aws-account-id 111122223333 --template-id +// 'reports_test_template' --alias-name STAGING —template-version-number 2 +// +// // Example sending a request using UpdateTemplateAliasRequest. +// req := client.UpdateTemplateAliasRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplateAlias +func (c *Client) UpdateTemplateAliasRequest(input *UpdateTemplateAliasInput) UpdateTemplateAliasRequest { + op := &aws.Operation{ + Name: opUpdateTemplateAlias, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}", + } + + if input == nil { + input = &UpdateTemplateAliasInput{} + } + + req := c.newRequest(op, input, &UpdateTemplateAliasOutput{}) + return UpdateTemplateAliasRequest{Request: req, Input: input, Copy: c.UpdateTemplateAliasRequest} +} + +// UpdateTemplateAliasRequest is the request type for the +// UpdateTemplateAlias API operation. +type UpdateTemplateAliasRequest struct { + *aws.Request + Input *UpdateTemplateAliasInput + Copy func(*UpdateTemplateAliasInput) UpdateTemplateAliasRequest +} + +// Send marshals and sends the UpdateTemplateAlias API request. +func (r UpdateTemplateAliasRequest) Send(ctx context.Context) (*UpdateTemplateAliasResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateTemplateAliasResponse{ + UpdateTemplateAliasOutput: r.Request.Data.(*UpdateTemplateAliasOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateTemplateAliasResponse is the response type for the +// UpdateTemplateAlias API operation. +type UpdateTemplateAliasResponse struct { + *UpdateTemplateAliasOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateTemplateAlias request. +func (r *UpdateTemplateAliasResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UpdateTemplatePermissions.go b/service/quicksight/api_op_UpdateTemplatePermissions.go new file mode 100644 index 00000000000..3eeb6cf1c76 --- /dev/null +++ b/service/quicksight/api_op_UpdateTemplatePermissions.go @@ -0,0 +1,279 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package quicksight + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UpdateTemplatePermissionsInput struct { + _ struct{} `type:"structure"` + + // AWS account ID that contains the template. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // A list of resource permissions to be granted on the template. The following + // example shows the shorthand syntax: + // + // Shorthand Syntax: Principal=string,Actions=string,string ... + GrantPermissions []ResourcePermission `min:"1" type:"list"` + + // A list of resource permissions to be revoked from the template. Shorthand + // syntax: Shorthand Syntax: Principal=string,Actions=string,string ... + RevokePermissions []ResourcePermission `min:"1" type:"list"` + + // The ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateTemplatePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTemplatePermissionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateTemplatePermissionsInput"} + + if s.AwsAccountId == nil { + invalidParams.Add(aws.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(aws.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.GrantPermissions != nil && len(s.GrantPermissions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GrantPermissions", 1)) + } + if s.RevokePermissions != nil && len(s.RevokePermissions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RevokePermissions", 1)) + } + + if s.TemplateId == nil { + invalidParams.Add(aws.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TemplateId", 1)) + } + if s.GrantPermissions != nil { + for i, v := range s.GrantPermissions { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GrantPermissions", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.RevokePermissions != nil { + for i, v := range s.RevokePermissions { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RevokePermissions", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateTemplatePermissionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.GrantPermissions != nil { + v := s.GrantPermissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "GrantPermissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.RevokePermissions != nil { + v := s.RevokePermissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "RevokePermissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type UpdateTemplatePermissionsOutput struct { + _ struct{} `type:"structure"` + + // A list of resource permissions to be set on the template. + Permissions []ResourcePermission `min:"1" type:"list"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The http status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The ARN of the template. + TemplateArn *string `type:"string"` + + // The ID for the template. + TemplateId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateTemplatePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateTemplatePermissionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Permissions != nil { + v := s.Permissions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Permissions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.RequestId != nil { + v := *s.RequestId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateArn != nil { + v := *s.TemplateArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + // ignoring invalid encode state, StatusCode. Status + return nil +} + +const opUpdateTemplatePermissions = "UpdateTemplatePermissions" + +// UpdateTemplatePermissionsRequest returns a request value for making API operation for +// Amazon QuickSight. +// +// Updates the permissions on a template. +// +// CLI syntax: +// +// * aws quicksight describe-template-permissions —aws-account-id 111122223333 +// —template-id reports_test_template +// +// * aws quicksight update-template-permissions —cli-input-json file://update-permission.json +// +// * The structure of update-permissions.json to add permissions: { "AwsAccountId": +// "111122223333", "DashboardId": "reports_test_template", "GrantPermissions": +// [ { "Principal": "arn:aws:quicksight:us-east-1:196359894473:user/default/user3", +// "Actions": [ "quicksight:DescribeTemplate", "quicksight:ListTemplateVersions" +// ] } ] } The structure of update-permissions.json to add permissions: { +// "AwsAccountId": "111122223333", "DashboardId": "reports_test_template", +// "RevokePermissions": [ { "Principal": "arn:aws:quicksight:us-east-1:196359894473:user/default/user3", +// "Actions": [ "quicksight:DescribeTemplate", "quicksight:ListTemplateVersions" +// ] } ] } To obtain the principal name of a QuickSight group or user, use +// user describe-group or describe-user. For example: aws quicksight describe-user +// --aws-account-id 111122223333 --namespace default --user-name user2 --region +// us-east-1 { "User": { "Arn": "arn:aws:quicksight:us-east-1:111122223333:user/default/user2", +// "Active": true, "Email": "user2@example.com", "Role": "ADMIN", "UserName": +// "user2", "PrincipalId": "federated/iam/abcd2abcdabcdeabc5ab5" }, "RequestId": +// "8f74bb31-6291-448a-a71c-a765a44bae31", "Status": 200 } +// +// // Example sending a request using UpdateTemplatePermissionsRequest. +// req := client.UpdateTemplatePermissionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplatePermissions +func (c *Client) UpdateTemplatePermissionsRequest(input *UpdateTemplatePermissionsInput) UpdateTemplatePermissionsRequest { + op := &aws.Operation{ + Name: opUpdateTemplatePermissions, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/permissions", + } + + if input == nil { + input = &UpdateTemplatePermissionsInput{} + } + + req := c.newRequest(op, input, &UpdateTemplatePermissionsOutput{}) + return UpdateTemplatePermissionsRequest{Request: req, Input: input, Copy: c.UpdateTemplatePermissionsRequest} +} + +// UpdateTemplatePermissionsRequest is the request type for the +// UpdateTemplatePermissions API operation. +type UpdateTemplatePermissionsRequest struct { + *aws.Request + Input *UpdateTemplatePermissionsInput + Copy func(*UpdateTemplatePermissionsInput) UpdateTemplatePermissionsRequest +} + +// Send marshals and sends the UpdateTemplatePermissions API request. +func (r UpdateTemplatePermissionsRequest) Send(ctx context.Context) (*UpdateTemplatePermissionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateTemplatePermissionsResponse{ + UpdateTemplatePermissionsOutput: r.Request.Data.(*UpdateTemplatePermissionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateTemplatePermissionsResponse is the response type for the +// UpdateTemplatePermissions API operation. +type UpdateTemplatePermissionsResponse struct { + *UpdateTemplatePermissionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateTemplatePermissions request. +func (r *UpdateTemplatePermissionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/quicksight/api_op_UpdateUser.go b/service/quicksight/api_op_UpdateUser.go index 306f4335b9c..9210e019354 100644 --- a/service/quicksight/api_op_UpdateUser.go +++ b/service/quicksight/api_op_UpdateUser.go @@ -33,7 +33,7 @@ type UpdateUserInput struct { // // * READER: A user who has read-only access to dashboards. // - // * AUTHOR: A user who can create data sources, data sets, analyses, and + // * AUTHOR: A user who can create data sources, datasets, analyses, and // dashboards. // // * ADMIN: A user who is an author, who can also manage Amazon QuickSight @@ -168,15 +168,13 @@ const opUpdateUser = "UpdateUser" // // Updates an Amazon QuickSight user. // -// The permission resource is arn:aws:quicksight:us-east-1::user/default/ . -// // The response is a user object that contains the user's Amazon QuickSight // user name, email address, active or inactive status in Amazon QuickSight, // Amazon QuickSight role, and Amazon Resource Name (ARN). // // CLI Sample: // -// aws quicksight update-user --user-name=Pat --role=ADMIN --email=new_address@amazon.com +// aws quicksight update-user --user-name=Pat --role=ADMIN --email=new_address@example.com // --aws-account-id=111122223333 --namespace=default --region=us-east-1 // // // Example sending a request using UpdateUserRequest. diff --git a/service/quicksight/api_types.go b/service/quicksight/api_types.go index a56d56556d7..c0cce44f1fc 100644 --- a/service/quicksight/api_types.go +++ b/service/quicksight/api_types.go @@ -3,45 +3,5475 @@ package quicksight import ( + "fmt" + "time" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/internal/awsutil" "github.com/aws/aws-sdk-go-v2/private/protocol" ) -var _ aws.Config -var _ = awsutil.Prettify +var _ aws.Config +var _ = awsutil.Prettify + +// The active IAM policy assignment. +type ActiveIAMPolicyAssignment struct { + _ struct{} `type:"structure"` + + // A name for the IAM policy assignment. + AssignmentName *string `min:"1" type:"string"` + + // The ARN of the resource. + PolicyArn *string `type:"string"` +} + +// String returns the string representation +func (s ActiveIAMPolicyAssignment) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ActiveIAMPolicyAssignment) MarshalFields(e protocol.FieldEncoder) error { + if s.AssignmentName != nil { + v := *s.AssignmentName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PolicyArn != nil { + v := *s.PolicyArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PolicyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Ad hoc filtering option. +type AdHocFilteringOption struct { + _ struct{} `type:"structure"` + + // Availability status. + AvailabilityStatus DashboardBehavior `type:"string" enum:"true"` +} + +// String returns the string representation +func (s AdHocFilteringOption) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AdHocFilteringOption) MarshalFields(e protocol.FieldEncoder) error { + if len(s.AvailabilityStatus) > 0 { + v := s.AvailabilityStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AvailabilityStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Amazon Elasticsearch parameters. +type AmazonElasticsearchParameters struct { + _ struct{} `type:"structure"` + + // The Amazon Elasticsearch domain. + // + // Domain is a required field + Domain *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AmazonElasticsearchParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AmazonElasticsearchParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AmazonElasticsearchParameters"} + + if s.Domain == nil { + invalidParams.Add(aws.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Domain", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AmazonElasticsearchParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.Domain != nil { + v := *s.Domain + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Domain", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Athena parameters. +type AthenaParameters struct { + _ struct{} `type:"structure"` + + // The workgroup that Athena uses. + WorkGroup *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AthenaParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AthenaParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AthenaParameters"} + if s.WorkGroup != nil && len(*s.WorkGroup) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("WorkGroup", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AthenaParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.WorkGroup != nil { + v := *s.WorkGroup + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "WorkGroup", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Aurora parameters. +type AuroraParameters struct { + _ struct{} `type:"structure"` + + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AuroraParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuroraParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AuroraParameters"} + + if s.Database == nil { + invalidParams.Add(aws.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Database", 1)) + } + + if s.Host == nil { + invalidParams.Add(aws.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Host", 1)) + } + + if s.Port == nil { + invalidParams.Add(aws.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(aws.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AuroraParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.Database != nil { + v := *s.Database + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Database", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Host != nil { + v := *s.Host + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Host", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Port != nil { + v := *s.Port + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Port", protocol.Int64Value(v), metadata) + } + return nil +} + +// Aurora PostgreSQL parameters. +type AuroraPostgreSqlParameters struct { + _ struct{} `type:"structure"` + + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AuroraPostgreSqlParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuroraPostgreSqlParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AuroraPostgreSqlParameters"} + + if s.Database == nil { + invalidParams.Add(aws.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Database", 1)) + } + + if s.Host == nil { + invalidParams.Add(aws.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Host", 1)) + } + + if s.Port == nil { + invalidParams.Add(aws.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(aws.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AuroraPostgreSqlParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.Database != nil { + v := *s.Database + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Database", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Host != nil { + v := *s.Host + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Host", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Port != nil { + v := *s.Port + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Port", protocol.Int64Value(v), metadata) + } + return nil +} + +// AWS IoT Analytics parameters. +type AwsIotAnalyticsParameters struct { + _ struct{} `type:"structure"` + + // Dataset name. + // + // DataSetName is a required field + DataSetName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AwsIotAnalyticsParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsIotAnalyticsParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AwsIotAnalyticsParameters"} + + if s.DataSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetName")) + } + if s.DataSetName != nil && len(*s.DataSetName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DataSetName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s AwsIotAnalyticsParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSetName != nil { + v := *s.DataSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A calculated column for a dataset. +type CalculatedColumn struct { + _ struct{} `type:"structure"` + + // A unique ID to identify a calculated column. During dataset update, if the + // column ID of a calculated column matches that of an existing calculated column, + // QuickSight preserves the existing calculated column. + // + // ColumnId is a required field + ColumnId *string `min:"1" type:"string" required:"true"` + + // Column name. + // + // ColumnName is a required field + ColumnName *string `min:"1" type:"string" required:"true"` + + // An expression that defines the calculated column. + // + // Expression is a required field + Expression *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CalculatedColumn) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CalculatedColumn) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CalculatedColumn"} + + if s.ColumnId == nil { + invalidParams.Add(aws.NewErrParamRequired("ColumnId")) + } + if s.ColumnId != nil && len(*s.ColumnId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ColumnId", 1)) + } + + if s.ColumnName == nil { + invalidParams.Add(aws.NewErrParamRequired("ColumnName")) + } + if s.ColumnName != nil && len(*s.ColumnName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ColumnName", 1)) + } + + if s.Expression == nil { + invalidParams.Add(aws.NewErrParamRequired("Expression")) + } + if s.Expression != nil && len(*s.Expression) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Expression", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CalculatedColumn) MarshalFields(e protocol.FieldEncoder) error { + if s.ColumnId != nil { + v := *s.ColumnId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ColumnId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ColumnName != nil { + v := *s.ColumnName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ColumnName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Expression != nil { + v := *s.Expression + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Expression", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A transform operation that casts a column to a different type. +type CastColumnTypeOperation struct { + _ struct{} `type:"structure"` + + // Column name. + // + // ColumnName is a required field + ColumnName *string `min:"1" type:"string" required:"true"` + + // When casting a column from string to datetime type, you can supply a QuickSight + // supported format string to denote the source data format. + Format *string `type:"string"` + + // New column data type. + // + // NewColumnType is a required field + NewColumnType ColumnDataType `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s CastColumnTypeOperation) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CastColumnTypeOperation) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CastColumnTypeOperation"} + + if s.ColumnName == nil { + invalidParams.Add(aws.NewErrParamRequired("ColumnName")) + } + if s.ColumnName != nil && len(*s.ColumnName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ColumnName", 1)) + } + if len(s.NewColumnType) == 0 { + invalidParams.Add(aws.NewErrParamRequired("NewColumnType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CastColumnTypeOperation) MarshalFields(e protocol.FieldEncoder) error { + if s.ColumnName != nil { + v := *s.ColumnName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ColumnName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Format != nil { + v := *s.Format + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Format", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.NewColumnType) > 0 { + v := s.NewColumnType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NewColumnType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Groupings of columns that work together in certain QuickSight features. This +// is a variant type structure. No more than one of the attributes should be +// non-null for this structure to be valid. +type ColumnGroup struct { + _ struct{} `type:"structure"` + + // Geospatial column group that denotes a hierarchy. + GeoSpatialColumnGroup *GeoSpatialColumnGroup `type:"structure"` +} + +// String returns the string representation +func (s ColumnGroup) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ColumnGroup) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ColumnGroup"} + if s.GeoSpatialColumnGroup != nil { + if err := s.GeoSpatialColumnGroup.Validate(); err != nil { + invalidParams.AddNested("GeoSpatialColumnGroup", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ColumnGroup) MarshalFields(e protocol.FieldEncoder) error { + if s.GeoSpatialColumnGroup != nil { + v := s.GeoSpatialColumnGroup + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "GeoSpatialColumnGroup", v, metadata) + } + return nil +} + +// A structure describing the name, datatype, and geographic role of the columns. +type ColumnGroupColumnSchema struct { + _ struct{} `type:"structure"` + + // The name of the column group's column schema. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ColumnGroupColumnSchema) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ColumnGroupColumnSchema) MarshalFields(e protocol.FieldEncoder) error { + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The column group schema. +type ColumnGroupSchema struct { + _ struct{} `type:"structure"` + + // A structure containing the list of column group column schemas. + ColumnGroupColumnSchemaList []ColumnGroupColumnSchema `type:"list"` + + // The name of the column group schema. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ColumnGroupSchema) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ColumnGroupSchema) MarshalFields(e protocol.FieldEncoder) error { + if s.ColumnGroupColumnSchemaList != nil { + v := s.ColumnGroupColumnSchemaList + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ColumnGroupColumnSchemaList", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The column schema. +type ColumnSchema struct { + _ struct{} `type:"structure"` + + // The data type of the column schema. + DataType *string `type:"string"` + + // The geographic role of the column schema. + GeographicRole *string `type:"string"` + + // The name of the column schema. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ColumnSchema) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ColumnSchema) MarshalFields(e protocol.FieldEncoder) error { + if s.DataType != nil { + v := *s.DataType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataType", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.GeographicRole != nil { + v := *s.GeographicRole + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "GeographicRole", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A tag for a column in a TagColumnOperation. This is a variant type structure. +// No more than one of the attributes should be non-null for this structure +// to be valid. +type ColumnTag struct { + _ struct{} `type:"structure"` + + // A geospatial role for a column. + ColumnGeographicRole GeoSpatialDataRole `type:"string" enum:"true"` +} + +// String returns the string representation +func (s ColumnTag) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ColumnTag) MarshalFields(e protocol.FieldEncoder) error { + if len(s.ColumnGeographicRole) > 0 { + v := s.ColumnGeographicRole + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ColumnGeographicRole", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// A transform operation that creates calculated columns. Columns created in +// one such operation form a lexical closure. +type CreateColumnsOperation struct { + _ struct{} `type:"structure"` + + // Calculated columns to create. + // + // Columns is a required field + Columns []CalculatedColumn `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateColumnsOperation) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateColumnsOperation) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateColumnsOperation"} + + if s.Columns == nil { + invalidParams.Add(aws.NewErrParamRequired("Columns")) + } + if s.Columns != nil && len(s.Columns) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Columns", 1)) + } + if s.Columns != nil { + for i, v := range s.Columns { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Columns", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateColumnsOperation) MarshalFields(e protocol.FieldEncoder) error { + if s.Columns != nil { + v := s.Columns + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Columns", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// The combination of username and password that are used as credentials. +type CredentialPair struct { + _ struct{} `type:"structure"` + + // Password. + // + // Password is a required field + Password *string `min:"1" type:"string" required:"true"` + + // Username. + // + // Username is a required field + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CredentialPair) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CredentialPair) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CredentialPair"} + + if s.Password == nil { + invalidParams.Add(aws.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Password", 1)) + } + + if s.Username == nil { + invalidParams.Add(aws.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Username", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CredentialPair) MarshalFields(e protocol.FieldEncoder) error { + if s.Password != nil { + v := *s.Password + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Password", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Username != nil { + v := *s.Username + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Username", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A physical table type built from the results of the custom SQL query. +type CustomSql struct { + _ struct{} `type:"structure"` + + // The column schema from the SQL query result set. + Columns []InputColumn `min:"1" type:"list"` + + // The ARN of the data source. + // + // DataSourceArn is a required field + DataSourceArn *string `type:"string" required:"true"` + + // A display name for the SQL query result. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The SQL query. + // + // SqlQuery is a required field + SqlQuery *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CustomSql) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomSql) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CustomSql"} + if s.Columns != nil && len(s.Columns) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Columns", 1)) + } + + if s.DataSourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSourceArn")) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + + if s.SqlQuery == nil { + invalidParams.Add(aws.NewErrParamRequired("SqlQuery")) + } + if s.SqlQuery != nil && len(*s.SqlQuery) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SqlQuery", 1)) + } + if s.Columns != nil { + for i, v := range s.Columns { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Columns", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CustomSql) MarshalFields(e protocol.FieldEncoder) error { + if s.Columns != nil { + v := s.Columns + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Columns", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.DataSourceArn != nil { + v := *s.DataSourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SqlQuery != nil { + v := *s.SqlQuery + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SqlQuery", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Dashboard. +type Dashboard struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) of the resource. + Arn *string `type:"string"` + + // The time this was created. + CreatedTime *time.Time `type:"timestamp"` + + // Dashboard ID. + DashboardId *string `min:"1" type:"string"` + + // The last time this was published. + LastPublishedTime *time.Time `type:"timestamp"` + + // The last time this was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A display name for the dataset. + Name *string `min:"1" type:"string"` + + // Version. + Version *DashboardVersion `type:"structure"` +} + +// String returns the string representation +func (s Dashboard) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Dashboard) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTime != nil { + v := *s.CreatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.LastPublishedTime != nil { + v := *s.LastPublishedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastPublishedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.LastUpdatedTime != nil { + v := *s.LastUpdatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastUpdatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Version != nil { + v := s.Version + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Version", v, metadata) + } + return nil +} + +// Dashboard error. +type DashboardError struct { + _ struct{} `type:"structure"` + + // Message. + Message *string `type:"string"` + + // Type. + Type DashboardErrorType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s DashboardError) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DashboardError) MarshalFields(e protocol.FieldEncoder) error { + if s.Message != nil { + v := *s.Message + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Message", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Dashboard publish options. +type DashboardPublishOptions struct { + _ struct{} `type:"structure"` + + // Ad hoc filtering option. + AdHocFilteringOption *AdHocFilteringOption `type:"structure"` + + // Export to CSV option. + ExportToCSVOption *ExportToCSVOption `type:"structure"` + + // Sheet controls option. + SheetControlsOption *SheetControlsOption `type:"structure"` +} + +// String returns the string representation +func (s DashboardPublishOptions) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DashboardPublishOptions) MarshalFields(e protocol.FieldEncoder) error { + if s.AdHocFilteringOption != nil { + v := s.AdHocFilteringOption + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "AdHocFilteringOption", v, metadata) + } + if s.ExportToCSVOption != nil { + v := s.ExportToCSVOption + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ExportToCSVOption", v, metadata) + } + if s.SheetControlsOption != nil { + v := s.SheetControlsOption + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SheetControlsOption", v, metadata) + } + return nil +} + +// Dashboard source entity. +type DashboardSourceEntity struct { + _ struct{} `type:"structure"` + + // Source template. + SourceTemplate *DashboardSourceTemplate `type:"structure"` +} + +// String returns the string representation +func (s DashboardSourceEntity) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DashboardSourceEntity) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DashboardSourceEntity"} + if s.SourceTemplate != nil { + if err := s.SourceTemplate.Validate(); err != nil { + invalidParams.AddNested("SourceTemplate", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DashboardSourceEntity) MarshalFields(e protocol.FieldEncoder) error { + if s.SourceTemplate != nil { + v := s.SourceTemplate + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SourceTemplate", v, metadata) + } + return nil +} + +// Dashboard source template. +type DashboardSourceTemplate struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) of the resource. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // Dataset references. + // + // DataSetReferences is a required field + DataSetReferences []DataSetReference `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DashboardSourceTemplate) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DashboardSourceTemplate) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DashboardSourceTemplate"} + + if s.Arn == nil { + invalidParams.Add(aws.NewErrParamRequired("Arn")) + } + + if s.DataSetReferences == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetReferences")) + } + if s.DataSetReferences != nil && len(s.DataSetReferences) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DataSetReferences", 1)) + } + if s.DataSetReferences != nil { + for i, v := range s.DataSetReferences { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DataSetReferences", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DashboardSourceTemplate) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetReferences != nil { + v := s.DataSetReferences + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DataSetReferences", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// Dashboard summary. +type DashboardSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) of the resource. + Arn *string `type:"string"` + + // The time this was created. + CreatedTime *time.Time `type:"timestamp"` + + // Dashboard ID. + DashboardId *string `min:"1" type:"string"` + + // The last time this was published. + LastPublishedTime *time.Time `type:"timestamp"` + + // The last time this was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A display name for the dataset. + Name *string `min:"1" type:"string"` + + // Published version number. + PublishedVersionNumber *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s DashboardSummary) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DashboardSummary) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTime != nil { + v := *s.CreatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.DashboardId != nil { + v := *s.DashboardId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.LastPublishedTime != nil { + v := *s.LastPublishedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastPublishedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.LastUpdatedTime != nil { + v := *s.LastUpdatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastUpdatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PublishedVersionNumber != nil { + v := *s.PublishedVersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PublishedVersionNumber", protocol.Int64Value(v), metadata) + } + return nil +} + +// Dashboard version. +type DashboardVersion struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) of the resource. + Arn *string `type:"string"` + + // The time this was created. + CreatedTime *time.Time `type:"timestamp"` + + // Description. + Description *string `min:"1" type:"string"` + + // Errors. + Errors []DashboardError `min:"1" type:"list"` + + // Source entity ARN. + SourceEntityArn *string `type:"string"` + + // The http status of the request. + Status ResourceStatus `type:"string" enum:"true"` + + // Version number. + VersionNumber *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s DashboardVersion) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DashboardVersion) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTime != nil { + v := *s.CreatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.Description != nil { + v := *s.Description + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Errors != nil { + v := s.Errors + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Errors", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.SourceEntityArn != nil { + v := *s.SourceEntityArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceEntityArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Status", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.VersionNumber != nil { + v := *s.VersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VersionNumber", protocol.Int64Value(v), metadata) + } + return nil +} + +// Dashboard version summary. +type DashboardVersionSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) of the resource. + Arn *string `type:"string"` + + // The time this was created. + CreatedTime *time.Time `type:"timestamp"` + + // Description. + Description *string `min:"1" type:"string"` + + // Source entity ARN. + SourceEntityArn *string `type:"string"` + + // The http status of the request. + Status ResourceStatus `type:"string" enum:"true"` + + // Version number. + VersionNumber *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s DashboardVersionSummary) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DashboardVersionSummary) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTime != nil { + v := *s.CreatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.Description != nil { + v := *s.Description + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SourceEntityArn != nil { + v := *s.SourceEntityArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SourceEntityArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Status", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.VersionNumber != nil { + v := *s.VersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VersionNumber", protocol.Int64Value(v), metadata) + } + return nil +} + +// Dataset. +type DataSet struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) of the resource. + Arn *string `type:"string"` + + // Groupings of columns that work together in certain QuickSight features. Currently + // only geospatial hierarchy is supported. + ColumnGroups []ColumnGroup `min:"1" type:"list"` + + // The amount of SPICE capacity used by this dataset. This is 0 if the dataset + // isn't imported into SPICE. + ConsumedSpiceCapacityInBytes *int64 `type:"long"` + + // The time this was created. + CreatedTime *time.Time `type:"timestamp"` + + // The ID of the dataset. + DataSetId *string `type:"string"` + + // Indicates whether or not you want to import the data into SPICE. + ImportMode DataSetImportMode `type:"string" enum:"true"` + + // The last time this was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // Configures the combination and transformation of the data from the physical + // tables. + LogicalTableMap map[string]LogicalTable `min:"1" type:"map"` + + // A display name for the dataset. + Name *string `min:"1" type:"string"` + + // The list of columns after all transforms. These columns are available in + // templates, analyses, and dashboards. + OutputColumns []OutputColumn `type:"list"` + + // Declares the physical tables that are available in the underlying data sources. + PhysicalTableMap map[string]PhysicalTable `min:"1" type:"map"` + + // Row-level security configuration on the dataset. + RowLevelPermissionDataSet *RowLevelPermissionDataSet `type:"structure"` +} + +// String returns the string representation +func (s DataSet) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DataSet) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ColumnGroups != nil { + v := s.ColumnGroups + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ColumnGroups", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.ConsumedSpiceCapacityInBytes != nil { + v := *s.ConsumedSpiceCapacityInBytes + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ConsumedSpiceCapacityInBytes", protocol.Int64Value(v), metadata) + } + if s.CreatedTime != nil { + v := *s.CreatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.ImportMode) > 0 { + v := s.ImportMode + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ImportMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.LastUpdatedTime != nil { + v := *s.LastUpdatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastUpdatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.LogicalTableMap != nil { + v := s.LogicalTableMap + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "LogicalTableMap", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetFields(k1, v1) + } + ms0.End() + + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.OutputColumns != nil { + v := s.OutputColumns + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "OutputColumns", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.PhysicalTableMap != nil { + v := s.PhysicalTableMap + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "PhysicalTableMap", metadata) + ms0.Start() + for k1, v1 := range v { + ms0.MapSetFields(k1, v1) + } + ms0.End() + + } + if s.RowLevelPermissionDataSet != nil { + v := s.RowLevelPermissionDataSet + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RowLevelPermissionDataSet", v, metadata) + } + return nil +} + +// Dataset configuration. +type DataSetConfiguration struct { + _ struct{} `type:"structure"` + + // A structure containing the list of column group schemas. + ColumnGroupSchemaList []ColumnGroupSchema `type:"list"` + + // Dataset schema. + DataSetSchema *DataSetSchema `type:"structure"` + + // Placeholder. + Placeholder *string `type:"string"` +} + +// String returns the string representation +func (s DataSetConfiguration) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DataSetConfiguration) MarshalFields(e protocol.FieldEncoder) error { + if s.ColumnGroupSchemaList != nil { + v := s.ColumnGroupSchemaList + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ColumnGroupSchemaList", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.DataSetSchema != nil { + v := s.DataSetSchema + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DataSetSchema", v, metadata) + } + if s.Placeholder != nil { + v := *s.Placeholder + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Placeholder", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Dataset reference. +type DataSetReference struct { + _ struct{} `type:"structure"` + + // Dataset ARN. + // + // DataSetArn is a required field + DataSetArn *string `type:"string" required:"true"` + + // Dataset placeholder. + // + // DataSetPlaceholder is a required field + DataSetPlaceholder *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DataSetReference) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataSetReference) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DataSetReference"} + + if s.DataSetArn == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetArn")) + } + + if s.DataSetPlaceholder == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetPlaceholder")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DataSetReference) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSetArn != nil { + v := *s.DataSetArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetPlaceholder != nil { + v := *s.DataSetPlaceholder + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetPlaceholder", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Dataset schema. +type DataSetSchema struct { + _ struct{} `type:"structure"` + + // A structure containing the list of column schemas. + ColumnSchemaList []ColumnSchema `type:"list"` +} + +// String returns the string representation +func (s DataSetSchema) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DataSetSchema) MarshalFields(e protocol.FieldEncoder) error { + if s.ColumnSchemaList != nil { + v := s.ColumnSchemaList + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ColumnSchemaList", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// Dataset summary. +type DataSetSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) of the dataset. + Arn *string `type:"string"` + + // The time this was created. + CreatedTime *time.Time `type:"timestamp"` + + // The ID of the dataset. + DataSetId *string `type:"string"` + + // Indicates whether or not you want to import the data into SPICE. + ImportMode DataSetImportMode `type:"string" enum:"true"` + + // The last time this was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A display name for the dataset. + Name *string `min:"1" type:"string"` + + // Row-level security configuration on the dataset. + RowLevelPermissionDataSet *RowLevelPermissionDataSet `type:"structure"` +} + +// String returns the string representation +func (s DataSetSummary) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DataSetSummary) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTime != nil { + v := *s.CreatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.DataSetId != nil { + v := *s.DataSetId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSetId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.ImportMode) > 0 { + v := s.ImportMode + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ImportMode", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.LastUpdatedTime != nil { + v := *s.LastUpdatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastUpdatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RowLevelPermissionDataSet != nil { + v := s.RowLevelPermissionDataSet + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RowLevelPermissionDataSet", v, metadata) + } + return nil +} + +// The structure of a data source. +type DataSource struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) of the data source. + Arn *string `type:"string"` + + // The time this was created. + CreatedTime *time.Time `type:"timestamp"` + + // The ID of the data source. This is unique per AWS Region per AWS account. + DataSourceId *string `type:"string"` + + // The parameters QuickSight uses to connect to your underlying source. This + // is a variant type structure. At most one of the attributes should be non-null + // for this structure to be valid. + DataSourceParameters *DataSourceParameters `type:"structure"` + + // Error information from the last update or the creation of the data source. + ErrorInfo *DataSourceErrorInfo `type:"structure"` + + // The last time this was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A display name for the data source. + Name *string `min:"1" type:"string"` + + // SSL properties that apply when QuickSight connects to your underlying source. + SslProperties *SslProperties `type:"structure"` + + // The http status of the request. + Status ResourceStatus `type:"string" enum:"true"` + + // The type of the data source. This indicates which database engine the data + // source connects to. + Type DataSourceType `type:"string" enum:"true"` + + // The VPC connection information. You need to use this parameter only when + // you want QuickSight to use a VPC connection when connecting to your underlying + // source. + VpcConnectionProperties *VpcConnectionProperties `type:"structure"` +} + +// String returns the string representation +func (s DataSource) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DataSource) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTime != nil { + v := *s.CreatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.DataSourceId != nil { + v := *s.DataSourceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSourceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSourceParameters != nil { + v := s.DataSourceParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DataSourceParameters", v, metadata) + } + if s.ErrorInfo != nil { + v := s.ErrorInfo + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ErrorInfo", v, metadata) + } + if s.LastUpdatedTime != nil { + v := *s.LastUpdatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastUpdatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SslProperties != nil { + v := s.SslProperties + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SslProperties", v, metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Status", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.VpcConnectionProperties != nil { + v := s.VpcConnectionProperties + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "VpcConnectionProperties", v, metadata) + } + return nil +} + +// Data source credentials. +type DataSourceCredentials struct { + _ struct{} `type:"structure" sensitive:"true"` + + // Credential pair. + CredentialPair *CredentialPair `type:"structure"` +} + +// String returns the string representation +func (s DataSourceCredentials) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataSourceCredentials) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DataSourceCredentials"} + if s.CredentialPair != nil { + if err := s.CredentialPair.Validate(); err != nil { + invalidParams.AddNested("CredentialPair", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DataSourceCredentials) MarshalFields(e protocol.FieldEncoder) error { + if s.CredentialPair != nil { + v := s.CredentialPair + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CredentialPair", v, metadata) + } + return nil +} + +// Error information on data source creation or update. +type DataSourceErrorInfo struct { + _ struct{} `type:"structure"` + + // Error message. + Message *string `type:"string"` + + // Error type. + Type DataSourceErrorInfoType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s DataSourceErrorInfo) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DataSourceErrorInfo) MarshalFields(e protocol.FieldEncoder) error { + if s.Message != nil { + v := *s.Message + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Message", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// The parameters QuickSight uses to connect to your underlying source. This +// is a variant type structure. At most one of the attributes should be non-null +// for this structure to be valid. +type DataSourceParameters struct { + _ struct{} `type:"structure"` + + // Amazon Elasticsearch parameters. + AmazonElasticsearchParameters *AmazonElasticsearchParameters `type:"structure"` + + // Athena parameters. + AthenaParameters *AthenaParameters `type:"structure"` + + // Aurora MySQL parameters. + AuroraParameters *AuroraParameters `type:"structure"` + + // Aurora PostgreSQL parameters. + AuroraPostgreSqlParameters *AuroraPostgreSqlParameters `type:"structure"` + + // AWS IoT Analytics parameters. + AwsIotAnalyticsParameters *AwsIotAnalyticsParameters `type:"structure"` + + // Jira parameters. + JiraParameters *JiraParameters `type:"structure"` + + // MariaDB parameters. + MariaDbParameters *MariaDbParameters `type:"structure"` + + // MySQL parameters. + MySqlParameters *MySqlParameters `type:"structure"` + + // PostgreSQL parameters. + PostgreSqlParameters *PostgreSqlParameters `type:"structure"` + + // Presto parameters. + PrestoParameters *PrestoParameters `type:"structure"` + + // RDS parameters. + RdsParameters *RdsParameters `type:"structure"` + + // Redshift parameters. + RedshiftParameters *RedshiftParameters `type:"structure"` + + // S3 parameters. + S3Parameters *S3Parameters `type:"structure"` + + // ServiceNow parameters. + ServiceNowParameters *ServiceNowParameters `type:"structure"` + + // Snowflake parameters. + SnowflakeParameters *SnowflakeParameters `type:"structure"` + + // Spark parameters. + SparkParameters *SparkParameters `type:"structure"` + + // SQL Server parameters. + SqlServerParameters *SqlServerParameters `type:"structure"` + + // Teradata parameters. + TeradataParameters *TeradataParameters `type:"structure"` + + // Twitter parameters. + TwitterParameters *TwitterParameters `type:"structure"` +} + +// String returns the string representation +func (s DataSourceParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataSourceParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DataSourceParameters"} + if s.AmazonElasticsearchParameters != nil { + if err := s.AmazonElasticsearchParameters.Validate(); err != nil { + invalidParams.AddNested("AmazonElasticsearchParameters", err.(aws.ErrInvalidParams)) + } + } + if s.AthenaParameters != nil { + if err := s.AthenaParameters.Validate(); err != nil { + invalidParams.AddNested("AthenaParameters", err.(aws.ErrInvalidParams)) + } + } + if s.AuroraParameters != nil { + if err := s.AuroraParameters.Validate(); err != nil { + invalidParams.AddNested("AuroraParameters", err.(aws.ErrInvalidParams)) + } + } + if s.AuroraPostgreSqlParameters != nil { + if err := s.AuroraPostgreSqlParameters.Validate(); err != nil { + invalidParams.AddNested("AuroraPostgreSqlParameters", err.(aws.ErrInvalidParams)) + } + } + if s.AwsIotAnalyticsParameters != nil { + if err := s.AwsIotAnalyticsParameters.Validate(); err != nil { + invalidParams.AddNested("AwsIotAnalyticsParameters", err.(aws.ErrInvalidParams)) + } + } + if s.JiraParameters != nil { + if err := s.JiraParameters.Validate(); err != nil { + invalidParams.AddNested("JiraParameters", err.(aws.ErrInvalidParams)) + } + } + if s.MariaDbParameters != nil { + if err := s.MariaDbParameters.Validate(); err != nil { + invalidParams.AddNested("MariaDbParameters", err.(aws.ErrInvalidParams)) + } + } + if s.MySqlParameters != nil { + if err := s.MySqlParameters.Validate(); err != nil { + invalidParams.AddNested("MySqlParameters", err.(aws.ErrInvalidParams)) + } + } + if s.PostgreSqlParameters != nil { + if err := s.PostgreSqlParameters.Validate(); err != nil { + invalidParams.AddNested("PostgreSqlParameters", err.(aws.ErrInvalidParams)) + } + } + if s.PrestoParameters != nil { + if err := s.PrestoParameters.Validate(); err != nil { + invalidParams.AddNested("PrestoParameters", err.(aws.ErrInvalidParams)) + } + } + if s.RdsParameters != nil { + if err := s.RdsParameters.Validate(); err != nil { + invalidParams.AddNested("RdsParameters", err.(aws.ErrInvalidParams)) + } + } + if s.RedshiftParameters != nil { + if err := s.RedshiftParameters.Validate(); err != nil { + invalidParams.AddNested("RedshiftParameters", err.(aws.ErrInvalidParams)) + } + } + if s.S3Parameters != nil { + if err := s.S3Parameters.Validate(); err != nil { + invalidParams.AddNested("S3Parameters", err.(aws.ErrInvalidParams)) + } + } + if s.ServiceNowParameters != nil { + if err := s.ServiceNowParameters.Validate(); err != nil { + invalidParams.AddNested("ServiceNowParameters", err.(aws.ErrInvalidParams)) + } + } + if s.SnowflakeParameters != nil { + if err := s.SnowflakeParameters.Validate(); err != nil { + invalidParams.AddNested("SnowflakeParameters", err.(aws.ErrInvalidParams)) + } + } + if s.SparkParameters != nil { + if err := s.SparkParameters.Validate(); err != nil { + invalidParams.AddNested("SparkParameters", err.(aws.ErrInvalidParams)) + } + } + if s.SqlServerParameters != nil { + if err := s.SqlServerParameters.Validate(); err != nil { + invalidParams.AddNested("SqlServerParameters", err.(aws.ErrInvalidParams)) + } + } + if s.TeradataParameters != nil { + if err := s.TeradataParameters.Validate(); err != nil { + invalidParams.AddNested("TeradataParameters", err.(aws.ErrInvalidParams)) + } + } + if s.TwitterParameters != nil { + if err := s.TwitterParameters.Validate(); err != nil { + invalidParams.AddNested("TwitterParameters", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DataSourceParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.AmazonElasticsearchParameters != nil { + v := s.AmazonElasticsearchParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "AmazonElasticsearchParameters", v, metadata) + } + if s.AthenaParameters != nil { + v := s.AthenaParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "AthenaParameters", v, metadata) + } + if s.AuroraParameters != nil { + v := s.AuroraParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "AuroraParameters", v, metadata) + } + if s.AuroraPostgreSqlParameters != nil { + v := s.AuroraPostgreSqlParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "AuroraPostgreSqlParameters", v, metadata) + } + if s.AwsIotAnalyticsParameters != nil { + v := s.AwsIotAnalyticsParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "AwsIotAnalyticsParameters", v, metadata) + } + if s.JiraParameters != nil { + v := s.JiraParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "JiraParameters", v, metadata) + } + if s.MariaDbParameters != nil { + v := s.MariaDbParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "MariaDbParameters", v, metadata) + } + if s.MySqlParameters != nil { + v := s.MySqlParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "MySqlParameters", v, metadata) + } + if s.PostgreSqlParameters != nil { + v := s.PostgreSqlParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "PostgreSqlParameters", v, metadata) + } + if s.PrestoParameters != nil { + v := s.PrestoParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "PrestoParameters", v, metadata) + } + if s.RdsParameters != nil { + v := s.RdsParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RdsParameters", v, metadata) + } + if s.RedshiftParameters != nil { + v := s.RedshiftParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RedshiftParameters", v, metadata) + } + if s.S3Parameters != nil { + v := s.S3Parameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "S3Parameters", v, metadata) + } + if s.ServiceNowParameters != nil { + v := s.ServiceNowParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ServiceNowParameters", v, metadata) + } + if s.SnowflakeParameters != nil { + v := s.SnowflakeParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SnowflakeParameters", v, metadata) + } + if s.SparkParameters != nil { + v := s.SparkParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SparkParameters", v, metadata) + } + if s.SqlServerParameters != nil { + v := s.SqlServerParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SqlServerParameters", v, metadata) + } + if s.TeradataParameters != nil { + v := s.TeradataParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "TeradataParameters", v, metadata) + } + if s.TwitterParameters != nil { + v := s.TwitterParameters + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "TwitterParameters", v, metadata) + } + return nil +} + +// Date time parameter. +type DateTimeParameter struct { + _ struct{} `type:"structure"` + + // A display name for the dataset. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Values. + // + // Values is a required field + Values []time.Time `type:"list" required:"true"` +} + +// String returns the string representation +func (s DateTimeParameter) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DateTimeParameter) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DateTimeParameter"} + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + + if s.Values == nil { + invalidParams.Add(aws.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DateTimeParameter) MarshalFields(e protocol.FieldEncoder) error { + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Values != nil { + v := s.Values + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Values", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.TimeValue{V: v1}) + } + ls0.End() + + } + return nil +} + +// Decimal parameter. +type DecimalParameter struct { + _ struct{} `type:"structure"` + + // A display name for the dataset. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Values. + // + // Values is a required field + Values []float64 `type:"list" required:"true"` +} + +// String returns the string representation +func (s DecimalParameter) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecimalParameter) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DecimalParameter"} + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + + if s.Values == nil { + invalidParams.Add(aws.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DecimalParameter) MarshalFields(e protocol.FieldEncoder) error { + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Values != nil { + v := s.Values + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Values", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.Float64Value(v1)) + } + ls0.End() + + } + return nil +} + +// Error information on a data set SPICE ingestion. +type ErrorInfo struct { + _ struct{} `type:"structure"` + + // Error essage. + Message *string `type:"string"` + + // Error type. + Type IngestionErrorType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s ErrorInfo) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ErrorInfo) MarshalFields(e protocol.FieldEncoder) error { + if s.Message != nil { + v := *s.Message + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Message", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Export to CSV option. +type ExportToCSVOption struct { + _ struct{} `type:"structure"` + + // Availability status. + AvailabilityStatus DashboardBehavior `type:"string" enum:"true"` +} + +// String returns the string representation +func (s ExportToCSVOption) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ExportToCSVOption) MarshalFields(e protocol.FieldEncoder) error { + if len(s.AvailabilityStatus) > 0 { + v := s.AvailabilityStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AvailabilityStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// A transform operation that filters rows based on some condition. +type FilterOperation struct { + _ struct{} `type:"structure"` + + // An expression that must evaluate to a boolean value. Rows for which the expression + // is evaluated to true are kept in the dataset. + // + // ConditionExpression is a required field + ConditionExpression *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s FilterOperation) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FilterOperation) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "FilterOperation"} + + if s.ConditionExpression == nil { + invalidParams.Add(aws.NewErrParamRequired("ConditionExpression")) + } + if s.ConditionExpression != nil && len(*s.ConditionExpression) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ConditionExpression", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s FilterOperation) MarshalFields(e protocol.FieldEncoder) error { + if s.ConditionExpression != nil { + v := *s.ConditionExpression + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ConditionExpression", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Geospatial column group that denotes a hierarchy. +type GeoSpatialColumnGroup struct { + _ struct{} `type:"structure"` + + // Columns in this hierarchy. + // + // Columns is a required field + Columns []string `min:"1" type:"list" required:"true"` + + // Country code. + // + // CountryCode is a required field + CountryCode GeoSpatialCountryCode `type:"string" required:"true" enum:"true"` + + // A display name for the hierarchy. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GeoSpatialColumnGroup) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GeoSpatialColumnGroup) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GeoSpatialColumnGroup"} + + if s.Columns == nil { + invalidParams.Add(aws.NewErrParamRequired("Columns")) + } + if s.Columns != nil && len(s.Columns) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Columns", 1)) + } + if len(s.CountryCode) == 0 { + invalidParams.Add(aws.NewErrParamRequired("CountryCode")) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GeoSpatialColumnGroup) MarshalFields(e protocol.FieldEncoder) error { + if s.Columns != nil { + v := s.Columns + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Columns", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if len(s.CountryCode) > 0 { + v := s.CountryCode + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CountryCode", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A group in Amazon QuickSight consists of a set of users. You can use groups +// to make it easier to manage access and security. Currently, an Amazon QuickSight +// subscription can't contain more than 500 Amazon QuickSight groups. +type Group struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) for the group. + Arn *string `type:"string"` + + // The group description. + Description *string `min:"1" type:"string"` + + // The name of the group. + GroupName *string `min:"1" type:"string"` + + // The principal ID of the group. + PrincipalId *string `type:"string"` +} + +// String returns the string representation +func (s Group) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Group) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Description != nil { + v := *s.Description + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.GroupName != nil { + v := *s.GroupName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "GroupName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PrincipalId != nil { + v := *s.PrincipalId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PrincipalId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A member of an Amazon QuickSight group. Currently, group members must be +// users. Groups can't be members of another group. . +type GroupMember struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) for the group member (user). + Arn *string `type:"string"` + + // The name of the group member (user). + MemberName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GroupMember) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GroupMember) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.MemberName != nil { + v := *s.MemberName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MemberName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// IAM policy assignment. +type IAMPolicyAssignment struct { + _ struct{} `type:"structure"` + + // Assignment ID. + AssignmentId *string `type:"string"` + + // Assignment name. + AssignmentName *string `min:"1" type:"string"` + + // Assignment status. + AssignmentStatus AssignmentStatus `type:"string" enum:"true"` + + // AWS account ID. + AwsAccountId *string `min:"12" type:"string"` + + // Identities. + Identities map[string][]string `type:"map"` + + // Policy ARN. + PolicyArn *string `type:"string"` +} + +// String returns the string representation +func (s IAMPolicyAssignment) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s IAMPolicyAssignment) MarshalFields(e protocol.FieldEncoder) error { + if s.AssignmentId != nil { + v := *s.AssignmentId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.AssignmentName != nil { + v := *s.AssignmentName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.AssignmentStatus) > 0 { + v := s.AssignmentStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.AwsAccountId != nil { + v := *s.AwsAccountId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AwsAccountId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Identities != nil { + v := s.Identities + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "Identities", metadata) + ms0.Start() + for k1, v1 := range v { + ls1 := ms0.List(k1) + ls1.Start() + for _, v2 := range v1 { + ls1.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v2)}) + } + ls1.End() + } + ms0.End() + + } + if s.PolicyArn != nil { + v := *s.PolicyArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PolicyArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// IAM policy assignment Summary. +type IAMPolicyAssignmentSummary struct { + _ struct{} `type:"structure"` + + // Assignment name. + AssignmentName *string `min:"1" type:"string"` + + // Assignment status. + AssignmentStatus AssignmentStatus `type:"string" enum:"true"` +} + +// String returns the string representation +func (s IAMPolicyAssignmentSummary) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s IAMPolicyAssignmentSummary) MarshalFields(e protocol.FieldEncoder) error { + if s.AssignmentName != nil { + v := *s.AssignmentName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.AssignmentStatus) > 0 { + v := s.AssignmentStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AssignmentStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Information on the SPICE ingestion for a dataset. +type Ingestion struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) of the resource. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // The time this ingestion started. + // + // CreatedTime is a required field + CreatedTime *time.Time `type:"timestamp" required:"true"` + + // Error information for this ingestion. + ErrorInfo *ErrorInfo `type:"structure"` + + // Ingestion ID. + IngestionId *string `min:"1" type:"string"` + + // Size of the data ingested in bytes. + IngestionSizeInBytes *int64 `type:"long"` + + // Ingestion status. + // + // IngestionStatus is a required field + IngestionStatus IngestionStatus `type:"string" required:"true" enum:"true"` + + // The time this ingestion took, measured in seconds. + IngestionTimeInSeconds *int64 `type:"long"` + + // Information on queued dataset SPICE ingestion. + QueueInfo *QueueInfo `type:"structure"` + + // Event source for this ingestion. + RequestSource IngestionRequestSource `type:"string" enum:"true"` + + // Type of this ingestion. + RequestType IngestionRequestType `type:"string" enum:"true"` + + // Information on rows during a data set SPICE ingestion. + RowInfo *RowInfo `type:"structure"` +} + +// String returns the string representation +func (s Ingestion) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Ingestion) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTime != nil { + v := *s.CreatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.ErrorInfo != nil { + v := s.ErrorInfo + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ErrorInfo", v, metadata) + } + if s.IngestionId != nil { + v := *s.IngestionId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IngestionId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IngestionSizeInBytes != nil { + v := *s.IngestionSizeInBytes + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IngestionSizeInBytes", protocol.Int64Value(v), metadata) + } + if len(s.IngestionStatus) > 0 { + v := s.IngestionStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IngestionStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.IngestionTimeInSeconds != nil { + v := *s.IngestionTimeInSeconds + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IngestionTimeInSeconds", protocol.Int64Value(v), metadata) + } + if s.QueueInfo != nil { + v := s.QueueInfo + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "QueueInfo", v, metadata) + } + if len(s.RequestSource) > 0 { + v := s.RequestSource + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestSource", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if len(s.RequestType) > 0 { + v := s.RequestType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RequestType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.RowInfo != nil { + v := s.RowInfo + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RowInfo", v, metadata) + } + return nil +} + +// Metadata on a column that is used as the input of a transform operation. +type InputColumn struct { + _ struct{} `type:"structure"` + + // The name of this column in the underlying data source. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The data type of the column. + // + // Type is a required field + Type InputColumnDataType `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s InputColumn) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputColumn) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "InputColumn"} + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + if len(s.Type) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s InputColumn) MarshalFields(e protocol.FieldEncoder) error { + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Integer parameter. +type IntegerParameter struct { + _ struct{} `type:"structure"` + + // A display name for the dataset. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Values. + // + // Values is a required field + Values []int64 `type:"list" required:"true"` +} + +// String returns the string representation +func (s IntegerParameter) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntegerParameter) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "IntegerParameter"} + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + + if s.Values == nil { + invalidParams.Add(aws.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s IntegerParameter) MarshalFields(e protocol.FieldEncoder) error { + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Values != nil { + v := s.Values + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Values", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.Int64Value(v1)) + } + ls0.End() + + } + return nil +} + +// Jira parameters. +type JiraParameters struct { + _ struct{} `type:"structure"` + + // The base URL of the Jira site. + // + // SiteBaseUrl is a required field + SiteBaseUrl *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s JiraParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *JiraParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "JiraParameters"} + + if s.SiteBaseUrl == nil { + invalidParams.Add(aws.NewErrParamRequired("SiteBaseUrl")) + } + if s.SiteBaseUrl != nil && len(*s.SiteBaseUrl) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SiteBaseUrl", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s JiraParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.SiteBaseUrl != nil { + v := *s.SiteBaseUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SiteBaseUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Join instruction. +type JoinInstruction struct { + _ struct{} `type:"structure"` + + // Left operand. + // + // LeftOperand is a required field + LeftOperand *string `min:"1" type:"string" required:"true"` + + // On Clause. + // + // OnClause is a required field + OnClause *string `min:"1" type:"string" required:"true"` + + // Right operand. + // + // RightOperand is a required field + RightOperand *string `min:"1" type:"string" required:"true"` + + // Type. + // + // Type is a required field + Type JoinType `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s JoinInstruction) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *JoinInstruction) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "JoinInstruction"} + + if s.LeftOperand == nil { + invalidParams.Add(aws.NewErrParamRequired("LeftOperand")) + } + if s.LeftOperand != nil && len(*s.LeftOperand) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("LeftOperand", 1)) + } + + if s.OnClause == nil { + invalidParams.Add(aws.NewErrParamRequired("OnClause")) + } + if s.OnClause != nil && len(*s.OnClause) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("OnClause", 1)) + } + + if s.RightOperand == nil { + invalidParams.Add(aws.NewErrParamRequired("RightOperand")) + } + if s.RightOperand != nil && len(*s.RightOperand) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RightOperand", 1)) + } + if len(s.Type) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s JoinInstruction) MarshalFields(e protocol.FieldEncoder) error { + if s.LeftOperand != nil { + v := *s.LeftOperand + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LeftOperand", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.OnClause != nil { + v := *s.OnClause + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "OnClause", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.RightOperand != nil { + v := *s.RightOperand + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RightOperand", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// A unit that joins and data transformations operate on. A logical table has +// a source, which can be either a physical table or result of a join. When +// it points to a physical table, a logical table acts as a mutable copy of +// that table through transform operations. +type LogicalTable struct { + _ struct{} `type:"structure"` + + // A display name for the logical table. + // + // Alias is a required field + Alias *string `min:"1" type:"string" required:"true"` + + // Transform operations that act on this logical table. + DataTransforms []TransformOperation `min:"1" type:"list"` + + // Source of this logical table. + // + // Source is a required field + Source *LogicalTableSource `type:"structure" required:"true"` +} + +// String returns the string representation +func (s LogicalTable) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogicalTable) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "LogicalTable"} + + if s.Alias == nil { + invalidParams.Add(aws.NewErrParamRequired("Alias")) + } + if s.Alias != nil && len(*s.Alias) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Alias", 1)) + } + if s.DataTransforms != nil && len(s.DataTransforms) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DataTransforms", 1)) + } + + if s.Source == nil { + invalidParams.Add(aws.NewErrParamRequired("Source")) + } + if s.DataTransforms != nil { + for i, v := range s.DataTransforms { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DataTransforms", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.Source != nil { + if err := s.Source.Validate(); err != nil { + invalidParams.AddNested("Source", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s LogicalTable) MarshalFields(e protocol.FieldEncoder) error { + if s.Alias != nil { + v := *s.Alias + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Alias", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataTransforms != nil { + v := s.DataTransforms + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DataTransforms", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.Source != nil { + v := s.Source + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Source", v, metadata) + } + return nil +} + +// Information on the source of a logical table. This is a variant type structure. +// No more than one of the attributes should be non-null for this structure +// to be valid. +type LogicalTableSource struct { + _ struct{} `type:"structure"` + + // Specifies the result of a join of two logical tables. + JoinInstruction *JoinInstruction `type:"structure"` + + // Physical table ID. + PhysicalTableId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LogicalTableSource) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogicalTableSource) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "LogicalTableSource"} + if s.PhysicalTableId != nil && len(*s.PhysicalTableId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PhysicalTableId", 1)) + } + if s.JoinInstruction != nil { + if err := s.JoinInstruction.Validate(); err != nil { + invalidParams.AddNested("JoinInstruction", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s LogicalTableSource) MarshalFields(e protocol.FieldEncoder) error { + if s.JoinInstruction != nil { + v := s.JoinInstruction + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "JoinInstruction", v, metadata) + } + if s.PhysicalTableId != nil { + v := *s.PhysicalTableId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PhysicalTableId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Amazon S3 manifest file location. +type ManifestFileLocation struct { + _ struct{} `type:"structure"` + + // Amazon S3 bucket. + // + // Bucket is a required field + Bucket *string `min:"1" type:"string" required:"true"` + + // Amazon S3 key that identifies an object. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ManifestFileLocation) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ManifestFileLocation) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ManifestFileLocation"} + + if s.Bucket == nil { + invalidParams.Add(aws.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Bucket", 1)) + } + + if s.Key == nil { + invalidParams.Add(aws.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ManifestFileLocation) MarshalFields(e protocol.FieldEncoder) error { + if s.Bucket != nil { + v := *s.Bucket + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Bucket", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Key != nil { + v := *s.Key + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Key", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// MariaDB parameters. +type MariaDbParameters struct { + _ struct{} `type:"structure"` + + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s MariaDbParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MariaDbParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "MariaDbParameters"} + + if s.Database == nil { + invalidParams.Add(aws.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Database", 1)) + } + + if s.Host == nil { + invalidParams.Add(aws.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Host", 1)) + } + + if s.Port == nil { + invalidParams.Add(aws.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(aws.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MariaDbParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.Database != nil { + v := *s.Database + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Database", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Host != nil { + v := *s.Host + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Host", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Port != nil { + v := *s.Port + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Port", protocol.Int64Value(v), metadata) + } + return nil +} + +// MySQL parameters. +type MySqlParameters struct { + _ struct{} `type:"structure"` + + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s MySqlParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MySqlParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "MySqlParameters"} + + if s.Database == nil { + invalidParams.Add(aws.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Database", 1)) + } + + if s.Host == nil { + invalidParams.Add(aws.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Host", 1)) + } + + if s.Port == nil { + invalidParams.Add(aws.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(aws.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MySqlParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.Database != nil { + v := *s.Database + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Database", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Host != nil { + v := *s.Host + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Host", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Port != nil { + v := *s.Port + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Port", protocol.Int64Value(v), metadata) + } + return nil +} + +// Output column. +type OutputColumn struct { + _ struct{} `type:"structure"` + + // A display name for the dataset. + Name *string `min:"1" type:"string"` + + // Type. + Type ColumnDataType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s OutputColumn) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s OutputColumn) MarshalFields(e protocol.FieldEncoder) error { + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Parameters. +type Parameters struct { + _ struct{} `type:"structure"` + + // DateTime parameters. + DateTimeParameters []DateTimeParameter `type:"list"` + + // Decimal parameters. + DecimalParameters []DecimalParameter `type:"list"` + + // Integer parameters. + IntegerParameters []IntegerParameter `type:"list"` + + // String parameters. + StringParameters []StringParameter `type:"list"` +} + +// String returns the string representation +func (s Parameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Parameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "Parameters"} + if s.DateTimeParameters != nil { + for i, v := range s.DateTimeParameters { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DateTimeParameters", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.DecimalParameters != nil { + for i, v := range s.DecimalParameters { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DecimalParameters", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.IntegerParameters != nil { + for i, v := range s.IntegerParameters { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "IntegerParameters", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.StringParameters != nil { + for i, v := range s.StringParameters { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "StringParameters", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Parameters) MarshalFields(e protocol.FieldEncoder) error { + if s.DateTimeParameters != nil { + v := s.DateTimeParameters + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DateTimeParameters", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.DecimalParameters != nil { + v := s.DecimalParameters + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DecimalParameters", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.IntegerParameters != nil { + v := s.IntegerParameters + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "IntegerParameters", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.StringParameters != nil { + v := s.StringParameters + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "StringParameters", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// A view of a data source. Contains information on the shape of the data in +// the underlying source. This is a variant type structure. No more than one +// of the attributes can be non-null for this structure to be valid. +type PhysicalTable struct { + _ struct{} `type:"structure"` + + // A physical table type built from the results of the custom SQL query. + CustomSql *CustomSql `type:"structure"` + + // A physical table type for relational data sources. + RelationalTable *RelationalTable `type:"structure"` + + // A physical table type for as S3 data source. + S3Source *S3Source `type:"structure"` +} + +// String returns the string representation +func (s PhysicalTable) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PhysicalTable) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PhysicalTable"} + if s.CustomSql != nil { + if err := s.CustomSql.Validate(); err != nil { + invalidParams.AddNested("CustomSql", err.(aws.ErrInvalidParams)) + } + } + if s.RelationalTable != nil { + if err := s.RelationalTable.Validate(); err != nil { + invalidParams.AddNested("RelationalTable", err.(aws.ErrInvalidParams)) + } + } + if s.S3Source != nil { + if err := s.S3Source.Validate(); err != nil { + invalidParams.AddNested("S3Source", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PhysicalTable) MarshalFields(e protocol.FieldEncoder) error { + if s.CustomSql != nil { + v := s.CustomSql + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CustomSql", v, metadata) + } + if s.RelationalTable != nil { + v := s.RelationalTable + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RelationalTable", v, metadata) + } + if s.S3Source != nil { + v := s.S3Source + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "S3Source", v, metadata) + } + return nil +} + +// PostgreSQL parameters. +type PostgreSqlParameters struct { + _ struct{} `type:"structure"` + + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s PostgreSqlParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PostgreSqlParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PostgreSqlParameters"} + + if s.Database == nil { + invalidParams.Add(aws.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Database", 1)) + } + + if s.Host == nil { + invalidParams.Add(aws.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Host", 1)) + } + + if s.Port == nil { + invalidParams.Add(aws.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(aws.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PostgreSqlParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.Database != nil { + v := *s.Database + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Database", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Host != nil { + v := *s.Host + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Host", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Port != nil { + v := *s.Port + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Port", protocol.Int64Value(v), metadata) + } + return nil +} + +// Presto parameters. +type PrestoParameters struct { + _ struct{} `type:"structure"` + + // Catalog. + // + // Catalog is a required field + Catalog *string `type:"string" required:"true"` + + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s PrestoParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PrestoParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PrestoParameters"} + + if s.Catalog == nil { + invalidParams.Add(aws.NewErrParamRequired("Catalog")) + } + + if s.Host == nil { + invalidParams.Add(aws.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Host", 1)) + } + + if s.Port == nil { + invalidParams.Add(aws.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(aws.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PrestoParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.Catalog != nil { + v := *s.Catalog + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Catalog", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Host != nil { + v := *s.Host + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Host", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Port != nil { + v := *s.Port + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Port", protocol.Int64Value(v), metadata) + } + return nil +} + +// A transform operation that projects columns. Operations that come after a +// projection can only refer to projected columns. +type ProjectOperation struct { + _ struct{} `type:"structure"` + + // Projected columns. + // + // ProjectedColumns is a required field + ProjectedColumns []string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s ProjectOperation) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProjectOperation) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ProjectOperation"} + + if s.ProjectedColumns == nil { + invalidParams.Add(aws.NewErrParamRequired("ProjectedColumns")) + } + if s.ProjectedColumns != nil && len(s.ProjectedColumns) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ProjectedColumns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ProjectOperation) MarshalFields(e protocol.FieldEncoder) error { + if s.ProjectedColumns != nil { + v := s.ProjectedColumns + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ProjectedColumns", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +// Information on queued dataset SPICE ingestion. +type QueueInfo struct { + _ struct{} `type:"structure"` + + // The ID of the ongoing ingestion. The queued ingestion is waiting for the + // ongoing ingestion to complete. + // + // QueuedIngestion is a required field + QueuedIngestion *string `type:"string" required:"true"` + + // The ID of the queued ingestion. + // + // WaitingOnIngestion is a required field + WaitingOnIngestion *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueInfo) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s QueueInfo) MarshalFields(e protocol.FieldEncoder) error { + if s.QueuedIngestion != nil { + v := *s.QueuedIngestion + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "QueuedIngestion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.WaitingOnIngestion != nil { + v := *s.WaitingOnIngestion + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "WaitingOnIngestion", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// RDS parameters. +type RdsParameters struct { + _ struct{} `type:"structure"` + + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // Instance ID. + // + // InstanceId is a required field + InstanceId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RdsParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RdsParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RdsParameters"} + + if s.Database == nil { + invalidParams.Add(aws.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Database", 1)) + } + + if s.InstanceId == nil { + invalidParams.Add(aws.NewErrParamRequired("InstanceId")) + } + if s.InstanceId != nil && len(*s.InstanceId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("InstanceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RdsParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.Database != nil { + v := *s.Database + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Database", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.InstanceId != nil { + v := *s.InstanceId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "InstanceId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Redshift parameters. The ClusterId field can be blank if Host and Port are +// both set, and the other way around. +type RedshiftParameters struct { + _ struct{} `type:"structure"` + + // Cluster ID. This can be blank if the Host and Port are provided. + ClusterId *string `min:"1" type:"string"` + + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // Host. This can be blank if the ClusterId is provided. + Host *string `min:"1" type:"string"` + + // Port. This can be blank if the ClusterId is provided. + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s RedshiftParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedshiftParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RedshiftParameters"} + if s.ClusterId != nil && len(*s.ClusterId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ClusterId", 1)) + } + + if s.Database == nil { + invalidParams.Add(aws.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Database", 1)) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Host", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RedshiftParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.ClusterId != nil { + v := *s.ClusterId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ClusterId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Database != nil { + v := *s.Database + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Database", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Host != nil { + v := *s.Host + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Host", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Port != nil { + v := *s.Port + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Port", protocol.Int64Value(v), metadata) + } + return nil +} + +// A physical table type for relational data sources. +type RelationalTable struct { + _ struct{} `type:"structure"` + + // Data source ARN. + // + // DataSourceArn is a required field + DataSourceArn *string `type:"string" required:"true"` + + // The column schema of the table. + // + // InputColumns is a required field + InputColumns []InputColumn `min:"1" type:"list" required:"true"` + + // Name of the relational table. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The schema name. Applies to certain relational database engines. + Schema *string `type:"string"` +} + +// String returns the string representation +func (s RelationalTable) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RelationalTable) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RelationalTable"} + + if s.DataSourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSourceArn")) + } + + if s.InputColumns == nil { + invalidParams.Add(aws.NewErrParamRequired("InputColumns")) + } + if s.InputColumns != nil && len(s.InputColumns) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("InputColumns", 1)) + } + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + if s.InputColumns != nil { + for i, v := range s.InputColumns { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InputColumns", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RelationalTable) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSourceArn != nil { + v := *s.DataSourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.InputColumns != nil { + v := s.InputColumns + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "InputColumns", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Schema != nil { + v := *s.Schema + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Schema", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A transform operation that renames a column. +type RenameColumnOperation struct { + _ struct{} `type:"structure"` + + // Name of the column to be renamed. + // + // ColumnName is a required field + ColumnName *string `min:"1" type:"string" required:"true"` + + // New name for the column. + // + // NewColumnName is a required field + NewColumnName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RenameColumnOperation) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RenameColumnOperation) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RenameColumnOperation"} + + if s.ColumnName == nil { + invalidParams.Add(aws.NewErrParamRequired("ColumnName")) + } + if s.ColumnName != nil && len(*s.ColumnName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ColumnName", 1)) + } + + if s.NewColumnName == nil { + invalidParams.Add(aws.NewErrParamRequired("NewColumnName")) + } + if s.NewColumnName != nil && len(*s.NewColumnName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewColumnName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RenameColumnOperation) MarshalFields(e protocol.FieldEncoder) error { + if s.ColumnName != nil { + v := *s.ColumnName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ColumnName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.NewColumnName != nil { + v := *s.NewColumnName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NewColumnName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Permission for the resource. +type ResourcePermission struct { + _ struct{} `type:"structure"` + + // The action to grant or revoke permissions on. For example, "quicksight:DescribeDashboard". + // + // Actions is a required field + Actions []string `min:"1" type:"list" required:"true"` + + // The ARN of a QuickSight user or group, or an IAM ARN. If you are using cross-account + // resource sharing, this is the IAM ARN of an account root. Otherwise, it is + // the ARN of a QuickSight user or group. . + // + // Principal is a required field + Principal *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResourcePermission) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourcePermission) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ResourcePermission"} + + if s.Actions == nil { + invalidParams.Add(aws.NewErrParamRequired("Actions")) + } + if s.Actions != nil && len(s.Actions) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Actions", 1)) + } + + if s.Principal == nil { + invalidParams.Add(aws.NewErrParamRequired("Principal")) + } + if s.Principal != nil && len(*s.Principal) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Principal", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ResourcePermission) MarshalFields(e protocol.FieldEncoder) error { + if s.Actions != nil { + v := s.Actions + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Actions", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.Principal != nil { + v := *s.Principal + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Principal", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Information on rows during a data set SPICE ingestion. +type RowInfo struct { + _ struct{} `type:"structure"` + + // The number of rows that were not ingested. + RowsDropped *int64 `type:"long"` + + // The number of rows that were ingested. + RowsIngested *int64 `type:"long"` +} + +// String returns the string representation +func (s RowInfo) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RowInfo) MarshalFields(e protocol.FieldEncoder) error { + if s.RowsDropped != nil { + v := *s.RowsDropped + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RowsDropped", protocol.Int64Value(v), metadata) + } + if s.RowsIngested != nil { + v := *s.RowsIngested + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RowsIngested", protocol.Int64Value(v), metadata) + } + return nil +} + +// Row-level security configuration on the dataset. +type RowLevelPermissionDataSet struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) of the permission dataset. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // Permission policy. + // + // PermissionPolicy is a required field + PermissionPolicy RowLevelPermissionPolicy `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s RowLevelPermissionDataSet) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RowLevelPermissionDataSet) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RowLevelPermissionDataSet"} + + if s.Arn == nil { + invalidParams.Add(aws.NewErrParamRequired("Arn")) + } + if len(s.PermissionPolicy) == 0 { + invalidParams.Add(aws.NewErrParamRequired("PermissionPolicy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RowLevelPermissionDataSet) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.PermissionPolicy) > 0 { + v := s.PermissionPolicy + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PermissionPolicy", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// S3 parameters. +type S3Parameters struct { + _ struct{} `type:"structure"` + + // Location of the Amazon S3 manifest file. This is NULL if the manifest file + // was uploaded in the console. + // + // ManifestFileLocation is a required field + ManifestFileLocation *ManifestFileLocation `type:"structure" required:"true"` +} + +// String returns the string representation +func (s S3Parameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Parameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "S3Parameters"} + + if s.ManifestFileLocation == nil { + invalidParams.Add(aws.NewErrParamRequired("ManifestFileLocation")) + } + if s.ManifestFileLocation != nil { + if err := s.ManifestFileLocation.Validate(); err != nil { + invalidParams.AddNested("ManifestFileLocation", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s S3Parameters) MarshalFields(e protocol.FieldEncoder) error { + if s.ManifestFileLocation != nil { + v := s.ManifestFileLocation + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ManifestFileLocation", v, metadata) + } + return nil +} + +// A physical table type for as S3 data source. +type S3Source struct { + _ struct{} `type:"structure"` + + // Data source ARN. + // + // DataSourceArn is a required field + DataSourceArn *string `type:"string" required:"true"` + + // A physical table type for as S3 data source. + // + // InputColumns is a required field + InputColumns []InputColumn `min:"1" type:"list" required:"true"` + + // Information on the S3 source file(s) format. + UploadSettings *UploadSettings `type:"structure"` +} + +// String returns the string representation +func (s S3Source) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Source) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "S3Source"} + + if s.DataSourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSourceArn")) + } + + if s.InputColumns == nil { + invalidParams.Add(aws.NewErrParamRequired("InputColumns")) + } + if s.InputColumns != nil && len(s.InputColumns) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("InputColumns", 1)) + } + if s.InputColumns != nil { + for i, v := range s.InputColumns { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InputColumns", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.UploadSettings != nil { + if err := s.UploadSettings.Validate(); err != nil { + invalidParams.AddNested("UploadSettings", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s S3Source) MarshalFields(e protocol.FieldEncoder) error { + if s.DataSourceArn != nil { + v := *s.DataSourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DataSourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.InputColumns != nil { + v := s.InputColumns + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "InputColumns", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.UploadSettings != nil { + v := s.UploadSettings + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "UploadSettings", v, metadata) + } + return nil +} + +// ServiceNow parameters. +type ServiceNowParameters struct { + _ struct{} `type:"structure"` + + // URL of the base site. + // + // SiteBaseUrl is a required field + SiteBaseUrl *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ServiceNowParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServiceNowParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ServiceNowParameters"} + + if s.SiteBaseUrl == nil { + invalidParams.Add(aws.NewErrParamRequired("SiteBaseUrl")) + } + if s.SiteBaseUrl != nil && len(*s.SiteBaseUrl) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SiteBaseUrl", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ServiceNowParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.SiteBaseUrl != nil { + v := *s.SiteBaseUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SiteBaseUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Sheet controls option. +type SheetControlsOption struct { + _ struct{} `type:"structure"` + + // Visibility state. + VisibilityState DashboardUIState `type:"string" enum:"true"` +} + +// String returns the string representation +func (s SheetControlsOption) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s SheetControlsOption) MarshalFields(e protocol.FieldEncoder) error { + if len(s.VisibilityState) > 0 { + v := s.VisibilityState + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VisibilityState", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Snowflake parameters. +type SnowflakeParameters struct { + _ struct{} `type:"structure"` + + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Warehouse. + // + // Warehouse is a required field + Warehouse *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SnowflakeParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SnowflakeParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SnowflakeParameters"} + + if s.Database == nil { + invalidParams.Add(aws.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Database", 1)) + } + + if s.Host == nil { + invalidParams.Add(aws.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Host", 1)) + } + + if s.Warehouse == nil { + invalidParams.Add(aws.NewErrParamRequired("Warehouse")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s SnowflakeParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.Database != nil { + v := *s.Database + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Database", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Host != nil { + v := *s.Host + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Host", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Warehouse != nil { + v := *s.Warehouse + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Warehouse", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Spark parameters. +type SparkParameters struct { + _ struct{} `type:"structure"` + + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s SparkParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SparkParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SparkParameters"} + + if s.Host == nil { + invalidParams.Add(aws.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Host", 1)) + } + + if s.Port == nil { + invalidParams.Add(aws.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(aws.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s SparkParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.Host != nil { + v := *s.Host + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Host", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Port != nil { + v := *s.Port + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Port", protocol.Int64Value(v), metadata) + } + return nil +} + +// SQL Server parameters. +type SqlServerParameters struct { + _ struct{} `type:"structure"` + + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s SqlServerParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SqlServerParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SqlServerParameters"} + + if s.Database == nil { + invalidParams.Add(aws.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Database", 1)) + } + + if s.Host == nil { + invalidParams.Add(aws.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Host", 1)) + } + + if s.Port == nil { + invalidParams.Add(aws.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(aws.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s SqlServerParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.Database != nil { + v := *s.Database + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Database", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Host != nil { + v := *s.Host + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Host", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Port != nil { + v := *s.Port + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Port", protocol.Int64Value(v), metadata) + } + return nil +} + +// SSL properties that apply when QuickSight connects to your underlying data +// source. +type SslProperties struct { + _ struct{} `type:"structure"` + + // A boolean flag to control whether SSL should be disabled. + DisableSsl *bool `type:"boolean"` +} + +// String returns the string representation +func (s SslProperties) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s SslProperties) MarshalFields(e protocol.FieldEncoder) error { + if s.DisableSsl != nil { + v := *s.DisableSsl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DisableSsl", protocol.BoolValue(v), metadata) + } + return nil +} + +// String parameter. +type StringParameter struct { + _ struct{} `type:"structure"` + + // A display name for the dataset. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Values. + // + // Values is a required field + Values []string `type:"list" required:"true"` +} + +// String returns the string representation +func (s StringParameter) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StringParameter) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "StringParameter"} + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + + if s.Values == nil { + invalidParams.Add(aws.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s StringParameter) MarshalFields(e protocol.FieldEncoder) error { + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Values != nil { + v := s.Values + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Values", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +// The keys of the key-value pairs for the resource tag or tags assigned to +// the resource. +type Tag struct { + _ struct{} `type:"structure"` + + // Tag key. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Tag value. + // + // Value is a required field + Value *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "Tag"} + + if s.Key == nil { + invalidParams.Add(aws.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Key", 1)) + } + + if s.Value == nil { + invalidParams.Add(aws.NewErrParamRequired("Value")) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Tag) MarshalFields(e protocol.FieldEncoder) error { + if s.Key != nil { + v := *s.Key + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Key", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Value != nil { + v := *s.Value + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Value", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A transform operation that tags a column with additional information. +type TagColumnOperation struct { + _ struct{} `type:"structure"` + + // The column that this operation acts on. + // + // ColumnName is a required field + ColumnName *string `min:"1" type:"string" required:"true"` + + // The dataset column tag, currently only used for geospatial type tagging. . + // + // This is not tags for the AWS tagging feature. . + // + // Tags is a required field + Tags []ColumnTag `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagColumnOperation) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagColumnOperation) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TagColumnOperation"} + + if s.ColumnName == nil { + invalidParams.Add(aws.NewErrParamRequired("ColumnName")) + } + if s.ColumnName != nil && len(*s.ColumnName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ColumnName", 1)) + } + + if s.Tags == nil { + invalidParams.Add(aws.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagColumnOperation) MarshalFields(e protocol.FieldEncoder) error { + if s.ColumnName != nil { + v := *s.ColumnName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ColumnName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// A template object. A template is an entity in QuickSight which encapsulates +// the metadata required to create an analysis that can be used to create dashboard. +// It adds a layer of abstraction by replacing the dataset associated with the +// analysis with placeholders. Templates can be used to create dashboards by +// replacing dataset placeholders with datasets which follow the same schema +// that was used to create the source analysis and template. +// +// You can share templates across AWS accounts by allowing users in other AWS +// accounts to create a template or a dashboard from an existing template. +type Template struct { + _ struct{} `type:"structure"` + + // The ARN of the template. + Arn *string `type:"string"` + + // Time when this was created. + CreatedTime *time.Time `type:"timestamp"` + + // Time when this was last updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // The display name of the template. + Name *string `min:"1" type:"string"` + + // The ID for the template. This is unique per region per AWS account. + TemplateId *string `min:"1" type:"string"` + + // A structure describing the versions of the template. + Version *TemplateVersion `type:"structure"` +} + +// String returns the string representation +func (s Template) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Template) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTime != nil { + v := *s.CreatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.LastUpdatedTime != nil { + v := *s.LastUpdatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastUpdatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Version != nil { + v := s.Version + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Version", v, metadata) + } + return nil +} + +// The template alias. +type TemplateAlias struct { + _ struct{} `type:"structure"` + + // The display name of the template alias. + AliasName *string `min:"1" type:"string"` + + // The ARN of the template alias. + Arn *string `type:"string"` + + // The version number of the template alias. + TemplateVersionNumber *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s TemplateAlias) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TemplateAlias) MarshalFields(e protocol.FieldEncoder) error { + if s.AliasName != nil { + v := *s.AliasName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AliasName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateVersionNumber != nil { + v := *s.TemplateVersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateVersionNumber", protocol.Int64Value(v), metadata) + } + return nil +} + +// List of errors that occurred when the template version creation failed. +type TemplateError struct { + _ struct{} `type:"structure"` + + // Description of the error type. + Message *string `type:"string"` + + // Type of error. + Type TemplateErrorType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s TemplateError) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TemplateError) MarshalFields(e protocol.FieldEncoder) error { + if s.Message != nil { + v := *s.Message + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Message", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Type) > 0 { + v := s.Type + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Type", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// The source analysis of the template. +type TemplateSourceAnalysis struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) of the resource. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // A structure containing information about the dataset references used as placeholders + // in the template. + // + // DataSetReferences is a required field + DataSetReferences []DataSetReference `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TemplateSourceAnalysis) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TemplateSourceAnalysis) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TemplateSourceAnalysis"} + + if s.Arn == nil { + invalidParams.Add(aws.NewErrParamRequired("Arn")) + } + + if s.DataSetReferences == nil { + invalidParams.Add(aws.NewErrParamRequired("DataSetReferences")) + } + if s.DataSetReferences != nil && len(s.DataSetReferences) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DataSetReferences", 1)) + } + if s.DataSetReferences != nil { + for i, v := range s.DataSetReferences { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DataSetReferences", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TemplateSourceAnalysis) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DataSetReferences != nil { + v := s.DataSetReferences + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DataSetReferences", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// The source entity of the template. +type TemplateSourceEntity struct { + _ struct{} `type:"structure"` + + // The source analysis, if it is based on an analysis. + SourceAnalysis *TemplateSourceAnalysis `type:"structure"` + + // The source template, if it is based on an template. + SourceTemplate *TemplateSourceTemplate `type:"structure"` +} + +// String returns the string representation +func (s TemplateSourceEntity) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TemplateSourceEntity) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TemplateSourceEntity"} + if s.SourceAnalysis != nil { + if err := s.SourceAnalysis.Validate(); err != nil { + invalidParams.AddNested("SourceAnalysis", err.(aws.ErrInvalidParams)) + } + } + if s.SourceTemplate != nil { + if err := s.SourceTemplate.Validate(); err != nil { + invalidParams.AddNested("SourceTemplate", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TemplateSourceEntity) MarshalFields(e protocol.FieldEncoder) error { + if s.SourceAnalysis != nil { + v := s.SourceAnalysis + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SourceAnalysis", v, metadata) + } + if s.SourceTemplate != nil { + v := s.SourceTemplate + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SourceTemplate", v, metadata) + } + return nil +} + +// The source template of the template. +type TemplateSourceTemplate struct { + _ struct{} `type:"structure"` + + // The Amazon Resource name (ARN) of the resource. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TemplateSourceTemplate) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TemplateSourceTemplate) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TemplateSourceTemplate"} + + if s.Arn == nil { + invalidParams.Add(aws.NewErrParamRequired("Arn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TemplateSourceTemplate) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The template summary. +type TemplateSummary struct { + _ struct{} `type:"structure"` + + // A summary of a template. + Arn *string `type:"string"` + + // The last time this was created. + CreatedTime *time.Time `type:"timestamp"` + + // The last time this was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A structure containing a list of version numbers for the template summary. + LatestVersionNumber *int64 `min:"1" type:"long"` + + // A display name for the template. + Name *string `min:"1" type:"string"` + + // The ID of the template. This is unique per region per AWS account. + TemplateId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TemplateSummary) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TemplateSummary) MarshalFields(e protocol.FieldEncoder) error { + if s.Arn != nil { + v := *s.Arn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.CreatedTime != nil { + v := *s.CreatedTime -// A group in Amazon QuickSight consists of a set of users. You can use groups -// to make it easier to manage access and security. Currently, an Amazon QuickSight -// subscription can't contain more than 500 Amazon QuickSight groups. -type Group struct { + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.LastUpdatedTime != nil { + v := *s.LastUpdatedTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastUpdatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.LatestVersionNumber != nil { + v := *s.LatestVersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LatestVersionNumber", protocol.Int64Value(v), metadata) + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateId != nil { + v := *s.TemplateId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A version of a template. +type TemplateVersion struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) for the group. - Arn *string `type:"string"` + // The time this was created. + CreatedTime *time.Time `type:"timestamp"` - // The group description. + // Schema of the dataset identified by the placeholder. The idea is that any + // dashboard created from the template should be bound to new datasets matching + // the same schema described through this API. . + DataSetConfigurations []DataSetConfiguration `type:"list"` + + // The description of the template. Description *string `min:"1" type:"string"` - // The name of the group. - GroupName *string `min:"1" type:"string"` + // Errors associated with the template. + Errors []TemplateError `min:"1" type:"list"` - // The principal ID of the group. - PrincipalId *string `type:"string"` + // The ARN of the analysis or template which was used to create this template. + SourceEntityArn *string `type:"string"` + + // The http status of the request. + Status ResourceStatus `type:"string" enum:"true"` + + // The version number of the template. + VersionNumber *int64 `min:"1" type:"long"` } // String returns the string representation -func (s Group) String() string { +func (s TemplateVersion) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s Group) MarshalFields(e protocol.FieldEncoder) error { - if s.Arn != nil { - v := *s.Arn +func (s TemplateVersion) MarshalFields(e protocol.FieldEncoder) error { + if s.CreatedTime != nil { + v := *s.CreatedTime metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + e.SetValue(protocol.BodyTarget, "CreatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.DataSetConfigurations != nil { + v := s.DataSetConfigurations + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DataSetConfigurations", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + } if s.Description != nil { v := *s.Description @@ -49,51 +5479,425 @@ func (s Group) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } - if s.GroupName != nil { - v := *s.GroupName + if s.Errors != nil { + v := s.Errors metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "GroupName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + ls0 := e.List(protocol.BodyTarget, "Errors", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + } - if s.PrincipalId != nil { - v := *s.PrincipalId + if s.SourceEntityArn != nil { + v := *s.SourceEntityArn metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "PrincipalId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + e.SetValue(protocol.BodyTarget, "SourceEntityArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Status", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.VersionNumber != nil { + v := *s.VersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VersionNumber", protocol.Int64Value(v), metadata) } return nil } -// A member of an Amazon QuickSight group. Currently, group members must be -// users. Groups can't be members of another group. -type GroupMember struct { +// The template version. +type TemplateVersionSummary struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) for the group member (user). + // The ARN of the template version. Arn *string `type:"string"` - // The name of the group member (user). - MemberName *string `min:"1" type:"string"` + // The time this was created. + CreatedTime *time.Time `type:"timestamp"` + + // The desription of the template version. + Description *string `min:"1" type:"string"` + + // The status of the template version. + Status ResourceStatus `type:"string" enum:"true"` + + // The version number of the template version. + VersionNumber *int64 `min:"1" type:"long"` } // String returns the string representation -func (s GroupMember) String() string { +func (s TemplateVersionSummary) String() string { return awsutil.Prettify(s) } // MarshalFields encodes the AWS API shape using the passed in protocol encoder. -func (s GroupMember) MarshalFields(e protocol.FieldEncoder) error { +func (s TemplateVersionSummary) MarshalFields(e protocol.FieldEncoder) error { if s.Arn != nil { v := *s.Arn metadata := protocol.Metadata{} e.SetValue(protocol.BodyTarget, "Arn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } - if s.MemberName != nil { - v := *s.MemberName + if s.CreatedTime != nil { + v := *s.CreatedTime metadata := protocol.Metadata{} - e.SetValue(protocol.BodyTarget, "MemberName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + e.SetValue(protocol.BodyTarget, "CreatedTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.Description != nil { + v := *s.Description + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Status", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.VersionNumber != nil { + v := *s.VersionNumber + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VersionNumber", protocol.Int64Value(v), metadata) + } + return nil +} + +// Teradata parameters. +type TeradataParameters struct { + _ struct{} `type:"structure"` + + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s TeradataParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TeradataParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TeradataParameters"} + + if s.Database == nil { + invalidParams.Add(aws.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Database", 1)) + } + + if s.Host == nil { + invalidParams.Add(aws.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Host", 1)) + } + + if s.Port == nil { + invalidParams.Add(aws.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(aws.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TeradataParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.Database != nil { + v := *s.Database + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Database", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Host != nil { + v := *s.Host + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Host", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Port != nil { + v := *s.Port + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Port", protocol.Int64Value(v), metadata) + } + return nil +} + +// A data transformation on a logical table. This is a variant type structure. +// No more than one of the attributes should be non-null for this structure +// to be valid. +type TransformOperation struct { + _ struct{} `type:"structure"` + + // A transform operation that casts a column to a different type. + CastColumnTypeOperation *CastColumnTypeOperation `type:"structure"` + + // An operation that creates calculated columns. Columns created in one such + // operation form a lexical closure. + CreateColumnsOperation *CreateColumnsOperation `type:"structure"` + + // An operation that filters rows based on some condition. + FilterOperation *FilterOperation `type:"structure"` + + // An operation that projects columns. Operations that come after a projection + // can only refer to projected columns. + ProjectOperation *ProjectOperation `type:"structure"` + + // An operation that renames a column. + RenameColumnOperation *RenameColumnOperation `type:"structure"` + + // An operation that tags a column with additional information. + TagColumnOperation *TagColumnOperation `type:"structure"` +} + +// String returns the string representation +func (s TransformOperation) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransformOperation) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TransformOperation"} + if s.CastColumnTypeOperation != nil { + if err := s.CastColumnTypeOperation.Validate(); err != nil { + invalidParams.AddNested("CastColumnTypeOperation", err.(aws.ErrInvalidParams)) + } + } + if s.CreateColumnsOperation != nil { + if err := s.CreateColumnsOperation.Validate(); err != nil { + invalidParams.AddNested("CreateColumnsOperation", err.(aws.ErrInvalidParams)) + } + } + if s.FilterOperation != nil { + if err := s.FilterOperation.Validate(); err != nil { + invalidParams.AddNested("FilterOperation", err.(aws.ErrInvalidParams)) + } + } + if s.ProjectOperation != nil { + if err := s.ProjectOperation.Validate(); err != nil { + invalidParams.AddNested("ProjectOperation", err.(aws.ErrInvalidParams)) + } + } + if s.RenameColumnOperation != nil { + if err := s.RenameColumnOperation.Validate(); err != nil { + invalidParams.AddNested("RenameColumnOperation", err.(aws.ErrInvalidParams)) + } + } + if s.TagColumnOperation != nil { + if err := s.TagColumnOperation.Validate(); err != nil { + invalidParams.AddNested("TagColumnOperation", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TransformOperation) MarshalFields(e protocol.FieldEncoder) error { + if s.CastColumnTypeOperation != nil { + v := s.CastColumnTypeOperation + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CastColumnTypeOperation", v, metadata) + } + if s.CreateColumnsOperation != nil { + v := s.CreateColumnsOperation + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CreateColumnsOperation", v, metadata) + } + if s.FilterOperation != nil { + v := s.FilterOperation + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "FilterOperation", v, metadata) + } + if s.ProjectOperation != nil { + v := s.ProjectOperation + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ProjectOperation", v, metadata) + } + if s.RenameColumnOperation != nil { + v := s.RenameColumnOperation + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "RenameColumnOperation", v, metadata) + } + if s.TagColumnOperation != nil { + v := s.TagColumnOperation + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "TagColumnOperation", v, metadata) + } + return nil +} + +// Twitter parameters. +type TwitterParameters struct { + _ struct{} `type:"structure"` + + // Maximum number of rows to query Twitter. + // + // MaxRows is a required field + MaxRows *int64 `min:"1" type:"integer" required:"true"` + + // Twitter query string. + // + // Query is a required field + Query *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TwitterParameters) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TwitterParameters) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TwitterParameters"} + + if s.MaxRows == nil { + invalidParams.Add(aws.NewErrParamRequired("MaxRows")) + } + if s.MaxRows != nil && *s.MaxRows < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxRows", 1)) + } + + if s.Query == nil { + invalidParams.Add(aws.NewErrParamRequired("Query")) + } + if s.Query != nil && len(*s.Query) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Query", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TwitterParameters) MarshalFields(e protocol.FieldEncoder) error { + if s.MaxRows != nil { + v := *s.MaxRows + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MaxRows", protocol.Int64Value(v), metadata) + } + if s.Query != nil { + v := *s.Query + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Query", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Information on source file(s) format. +type UploadSettings struct { + _ struct{} `type:"structure"` + + // Whether or not the file(s) has a header row. + ContainsHeader *bool `type:"boolean"` + + // The delimiter between values in the file. + Delimiter *string `min:"1" type:"string"` + + // File format. + Format FileFormat `type:"string" enum:"true"` + + // A row number to start reading data from. + StartFromRow *int64 `min:"1" type:"integer"` + + // Text qualifier. + TextQualifier TextQualifier `type:"string" enum:"true"` +} + +// String returns the string representation +func (s UploadSettings) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadSettings) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UploadSettings"} + if s.Delimiter != nil && len(*s.Delimiter) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Delimiter", 1)) + } + if s.StartFromRow != nil && *s.StartFromRow < 1 { + invalidParams.Add(aws.NewErrParamMinValue("StartFromRow", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UploadSettings) MarshalFields(e protocol.FieldEncoder) error { + if s.ContainsHeader != nil { + v := *s.ContainsHeader + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ContainsHeader", protocol.BoolValue(v), metadata) + } + if s.Delimiter != nil { + v := *s.Delimiter + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Delimiter", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.Format) > 0 { + v := s.Format + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Format", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.StartFromRow != nil { + v := *s.StartFromRow + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "StartFromRow", protocol.Int64Value(v), metadata) + } + if len(s.TextQualifier) > 0 { + v := s.TextQualifier + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TextQualifier", protocol.QuotedValue{ValueMarshaler: v}, metadata) } return nil } @@ -105,10 +5909,10 @@ type User struct { // Active status of user. When you create an Amazon QuickSight user that’s // not an IAM user or an AD user, that user is inactive until they sign in and - // provide a password + // provide a password. Active *bool `type:"boolean"` - // The Amazon Resource Name (ARN) for the user. + // The Amazon Resource name (ARN) for the user. Arn *string `type:"string"` // The user's email address. @@ -120,7 +5924,20 @@ type User struct { // The principal ID of the user. PrincipalId *string `type:"string"` - // The Amazon QuickSight role for the user. + // The Amazon QuickSight role for the user. The user role can be one of the + // following:. + // + // * READER: A user who has read-only access to dashboards. + // + // * AUTHOR: A user who can create data sources, datasets, analyses, and + // dashboards. + // + // * ADMIN: A user who is an author, who can also manage Amazon QuickSight + // settings. + // + // * RESTRICTED_READER: This role isn't currently available for use. + // + // * RESTRICTED_AUTHOR: This role isn't currently available for use. Role UserRole `type:"string" enum:"true"` // The user's user name. @@ -178,3 +5995,43 @@ func (s User) MarshalFields(e protocol.FieldEncoder) error { } return nil } + +// VPC connection properties. +type VpcConnectionProperties struct { + _ struct{} `type:"structure"` + + // VPC connection ARN. + // + // VpcConnectionArn is a required field + VpcConnectionArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VpcConnectionProperties) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VpcConnectionProperties) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "VpcConnectionProperties"} + + if s.VpcConnectionArn == nil { + invalidParams.Add(aws.NewErrParamRequired("VpcConnectionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s VpcConnectionProperties) MarshalFields(e protocol.FieldEncoder) error { + if s.VpcConnectionArn != nil { + v := *s.VpcConnectionArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VpcConnectionArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} diff --git a/service/quicksight/quicksightiface/interface.go b/service/quicksight/quicksightiface/interface.go index 172cfc5fedb..523e4d8967f 100644 --- a/service/quicksight/quicksightiface/interface.go +++ b/service/quicksight/quicksightiface/interface.go @@ -23,7 +23,7 @@ import ( // // myFunc uses an SDK service client to make a request to // // Amazon QuickSight. // func myFunc(svc quicksightiface.ClientAPI) bool { -// // Make svc.CreateGroup request +// // Make svc.CancelIngestion request // } // // func main() { @@ -43,7 +43,7 @@ import ( // type mockClientClient struct { // quicksightiface.ClientPI // } -// func (m *mockClientClient) CreateGroup(input *quicksight.CreateGroupInput) (*quicksight.CreateGroupOutput, error) { +// func (m *mockClientClient) CancelIngestion(input *quicksight.CancelIngestionInput) (*quicksight.CancelIngestionOutput, error) { // // mock response/functionality // } // @@ -61,36 +61,134 @@ import ( // and waiters. Its suggested to use the pattern above for testing, or using // tooling to generate mocks to satisfy the interfaces. type ClientAPI interface { + CancelIngestionRequest(*quicksight.CancelIngestionInput) quicksight.CancelIngestionRequest + + CreateDashboardRequest(*quicksight.CreateDashboardInput) quicksight.CreateDashboardRequest + + CreateDataSetRequest(*quicksight.CreateDataSetInput) quicksight.CreateDataSetRequest + + CreateDataSourceRequest(*quicksight.CreateDataSourceInput) quicksight.CreateDataSourceRequest + CreateGroupRequest(*quicksight.CreateGroupInput) quicksight.CreateGroupRequest CreateGroupMembershipRequest(*quicksight.CreateGroupMembershipInput) quicksight.CreateGroupMembershipRequest + CreateIAMPolicyAssignmentRequest(*quicksight.CreateIAMPolicyAssignmentInput) quicksight.CreateIAMPolicyAssignmentRequest + + CreateIngestionRequest(*quicksight.CreateIngestionInput) quicksight.CreateIngestionRequest + + CreateTemplateRequest(*quicksight.CreateTemplateInput) quicksight.CreateTemplateRequest + + CreateTemplateAliasRequest(*quicksight.CreateTemplateAliasInput) quicksight.CreateTemplateAliasRequest + + DeleteDashboardRequest(*quicksight.DeleteDashboardInput) quicksight.DeleteDashboardRequest + + DeleteDataSetRequest(*quicksight.DeleteDataSetInput) quicksight.DeleteDataSetRequest + + DeleteDataSourceRequest(*quicksight.DeleteDataSourceInput) quicksight.DeleteDataSourceRequest + DeleteGroupRequest(*quicksight.DeleteGroupInput) quicksight.DeleteGroupRequest DeleteGroupMembershipRequest(*quicksight.DeleteGroupMembershipInput) quicksight.DeleteGroupMembershipRequest + DeleteIAMPolicyAssignmentRequest(*quicksight.DeleteIAMPolicyAssignmentInput) quicksight.DeleteIAMPolicyAssignmentRequest + + DeleteTemplateRequest(*quicksight.DeleteTemplateInput) quicksight.DeleteTemplateRequest + + DeleteTemplateAliasRequest(*quicksight.DeleteTemplateAliasInput) quicksight.DeleteTemplateAliasRequest + DeleteUserRequest(*quicksight.DeleteUserInput) quicksight.DeleteUserRequest DeleteUserByPrincipalIdRequest(*quicksight.DeleteUserByPrincipalIdInput) quicksight.DeleteUserByPrincipalIdRequest + DescribeDashboardRequest(*quicksight.DescribeDashboardInput) quicksight.DescribeDashboardRequest + + DescribeDashboardPermissionsRequest(*quicksight.DescribeDashboardPermissionsInput) quicksight.DescribeDashboardPermissionsRequest + + DescribeDataSetRequest(*quicksight.DescribeDataSetInput) quicksight.DescribeDataSetRequest + + DescribeDataSetPermissionsRequest(*quicksight.DescribeDataSetPermissionsInput) quicksight.DescribeDataSetPermissionsRequest + + DescribeDataSourceRequest(*quicksight.DescribeDataSourceInput) quicksight.DescribeDataSourceRequest + + DescribeDataSourcePermissionsRequest(*quicksight.DescribeDataSourcePermissionsInput) quicksight.DescribeDataSourcePermissionsRequest + DescribeGroupRequest(*quicksight.DescribeGroupInput) quicksight.DescribeGroupRequest + DescribeIAMPolicyAssignmentRequest(*quicksight.DescribeIAMPolicyAssignmentInput) quicksight.DescribeIAMPolicyAssignmentRequest + + DescribeIngestionRequest(*quicksight.DescribeIngestionInput) quicksight.DescribeIngestionRequest + + DescribeTemplateRequest(*quicksight.DescribeTemplateInput) quicksight.DescribeTemplateRequest + + DescribeTemplateAliasRequest(*quicksight.DescribeTemplateAliasInput) quicksight.DescribeTemplateAliasRequest + + DescribeTemplatePermissionsRequest(*quicksight.DescribeTemplatePermissionsInput) quicksight.DescribeTemplatePermissionsRequest + DescribeUserRequest(*quicksight.DescribeUserInput) quicksight.DescribeUserRequest GetDashboardEmbedUrlRequest(*quicksight.GetDashboardEmbedUrlInput) quicksight.GetDashboardEmbedUrlRequest + ListDashboardVersionsRequest(*quicksight.ListDashboardVersionsInput) quicksight.ListDashboardVersionsRequest + + ListDashboardsRequest(*quicksight.ListDashboardsInput) quicksight.ListDashboardsRequest + + ListDataSetsRequest(*quicksight.ListDataSetsInput) quicksight.ListDataSetsRequest + + ListDataSourcesRequest(*quicksight.ListDataSourcesInput) quicksight.ListDataSourcesRequest + ListGroupMembershipsRequest(*quicksight.ListGroupMembershipsInput) quicksight.ListGroupMembershipsRequest ListGroupsRequest(*quicksight.ListGroupsInput) quicksight.ListGroupsRequest + ListIAMPolicyAssignmentsRequest(*quicksight.ListIAMPolicyAssignmentsInput) quicksight.ListIAMPolicyAssignmentsRequest + + ListIAMPolicyAssignmentsForUserRequest(*quicksight.ListIAMPolicyAssignmentsForUserInput) quicksight.ListIAMPolicyAssignmentsForUserRequest + + ListIngestionsRequest(*quicksight.ListIngestionsInput) quicksight.ListIngestionsRequest + + ListTagsForResourceRequest(*quicksight.ListTagsForResourceInput) quicksight.ListTagsForResourceRequest + + ListTemplateAliasesRequest(*quicksight.ListTemplateAliasesInput) quicksight.ListTemplateAliasesRequest + + ListTemplateVersionsRequest(*quicksight.ListTemplateVersionsInput) quicksight.ListTemplateVersionsRequest + + ListTemplatesRequest(*quicksight.ListTemplatesInput) quicksight.ListTemplatesRequest + ListUserGroupsRequest(*quicksight.ListUserGroupsInput) quicksight.ListUserGroupsRequest ListUsersRequest(*quicksight.ListUsersInput) quicksight.ListUsersRequest RegisterUserRequest(*quicksight.RegisterUserInput) quicksight.RegisterUserRequest + TagResourceRequest(*quicksight.TagResourceInput) quicksight.TagResourceRequest + + UntagResourceRequest(*quicksight.UntagResourceInput) quicksight.UntagResourceRequest + + UpdateDashboardRequest(*quicksight.UpdateDashboardInput) quicksight.UpdateDashboardRequest + + UpdateDashboardPermissionsRequest(*quicksight.UpdateDashboardPermissionsInput) quicksight.UpdateDashboardPermissionsRequest + + UpdateDashboardPublishedVersionRequest(*quicksight.UpdateDashboardPublishedVersionInput) quicksight.UpdateDashboardPublishedVersionRequest + + UpdateDataSetRequest(*quicksight.UpdateDataSetInput) quicksight.UpdateDataSetRequest + + UpdateDataSetPermissionsRequest(*quicksight.UpdateDataSetPermissionsInput) quicksight.UpdateDataSetPermissionsRequest + + UpdateDataSourceRequest(*quicksight.UpdateDataSourceInput) quicksight.UpdateDataSourceRequest + + UpdateDataSourcePermissionsRequest(*quicksight.UpdateDataSourcePermissionsInput) quicksight.UpdateDataSourcePermissionsRequest + UpdateGroupRequest(*quicksight.UpdateGroupInput) quicksight.UpdateGroupRequest + UpdateIAMPolicyAssignmentRequest(*quicksight.UpdateIAMPolicyAssignmentInput) quicksight.UpdateIAMPolicyAssignmentRequest + + UpdateTemplateRequest(*quicksight.UpdateTemplateInput) quicksight.UpdateTemplateRequest + + UpdateTemplateAliasRequest(*quicksight.UpdateTemplateAliasInput) quicksight.UpdateTemplateAliasRequest + + UpdateTemplatePermissionsRequest(*quicksight.UpdateTemplatePermissionsInput) quicksight.UpdateTemplatePermissionsRequest + UpdateUserRequest(*quicksight.UpdateUserInput) quicksight.UpdateUserRequest } diff --git a/service/rds/api_op_CreateDBInstance.go b/service/rds/api_op_CreateDBInstance.go index 3b09374adb0..f92785a69f2 100644 --- a/service/rds/api_op_CreateDBInstance.go +++ b/service/rds/api_op_CreateDBInstance.go @@ -264,6 +264,13 @@ type CreateDBInstanceInput struct { // The database can't be deleted when deletion protection is enabled. By default, // deletion protection is disabled. For more information, see Deleting a DB // Instance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). + // + // Amazon Aurora + // + // Not applicable. You can enable or disable deletion protection for the DB + // cluster. For more information, see CreateDBCluster. DB instances in a DB + // cluster can be deleted even when deletion protection is enabled for the DB + // cluster. DeletionProtection *bool `type:"boolean"` // The Active Directory directory ID to create the DB instance in. Currently, diff --git a/service/rds/api_op_CreateEventSubscription.go b/service/rds/api_op_CreateEventSubscription.go index 7c12b3595b4..0d6f9c8ea49 100644 --- a/service/rds/api_op_CreateEventSubscription.go +++ b/service/rds/api_op_CreateEventSubscription.go @@ -133,6 +133,9 @@ const opCreateEventSubscription = "CreateEventSubscription" // the SourceIdentifier, you are notified of events generated from all RDS sources // belonging to your customer account. // +// RDS event notification is only available for unencrypted SNS topics. If you +// specify an encrypted SNS topic, event notifications aren't sent for the topic. +// // // Example sending a request using CreateEventSubscriptionRequest. // req := client.CreateEventSubscriptionRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/rds/api_op_ModifyCurrentDBClusterCapacity.go b/service/rds/api_op_ModifyCurrentDBClusterCapacity.go index 4666403332e..4f339a32f34 100644 --- a/service/rds/api_op_ModifyCurrentDBClusterCapacity.go +++ b/service/rds/api_op_ModifyCurrentDBClusterCapacity.go @@ -19,7 +19,11 @@ type ModifyCurrentDBClusterCapacityInput struct { // // Constraints: // - // * Value must be 1, 2, 4, 8, 16, 32, 64, 128, or 256. + // * For Aurora MySQL, valid capacity values are 1, 2, 4, 8, 16, 32, 64, + // 128, and 256. + // + // * For Aurora PostgreSQL, valid capacity values are 2, 4, 8, 16, 32, 64, + // 192, and 384. Capacity *int64 `type:"integer"` // The DB cluster identifier for the cluster being modified. This parameter diff --git a/service/rds/api_op_ModifyDBSnapshot.go b/service/rds/api_op_ModifyDBSnapshot.go index 575bd01426f..c6271c6540c 100644 --- a/service/rds/api_op_ModifyDBSnapshot.go +++ b/service/rds/api_op_ModifyDBSnapshot.go @@ -33,6 +33,11 @@ type ModifyDBSnapshotInput struct { // * 11.2.0.4.v12 (supported for 11.2.0.2 DB snapshots) // // * 11.2.0.4.v11 (supported for 11.2.0.3 DB snapshots) + // + // PostgreSQL + // + // For the list of engine versions that are available for upgrading a DB snapshot, + // see Upgrading the PostgreSQL DB Engine for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.PostgreSQL.html#USER_UpgradeDBInstance.PostgreSQL.MajorVersion). EngineVersion *string `type:"string"` // The option group to identify with the upgraded DB snapshot. @@ -86,7 +91,7 @@ const opModifyDBSnapshot = "ModifyDBSnapshot" // Updates a manual DB snapshot, which can be encrypted or not encrypted, with // a new engine version. // -// Amazon RDS supports upgrading DB snapshots for MySQL and Oracle. +// Amazon RDS supports upgrading DB snapshots for MySQL, Oracle, and PostgreSQL. // // // Example sending a request using ModifyDBSnapshotRequest. // req := client.ModifyDBSnapshotRequest(params) diff --git a/service/rds/api_types.go b/service/rds/api_types.go index 2aeb499cffa..41bd64eafe3 100644 --- a/service/rds/api_types.go +++ b/service/rds/api_types.go @@ -2852,14 +2852,22 @@ type ScalingConfiguration struct { // The maximum capacity for an Aurora DB cluster in serverless DB engine mode. // - // Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256. + // For Aurora MySQL, valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, + // and 256. + // + // For Aurora PostgreSQL, valid capacity values are 2, 4, 8, 16, 32, 64, 192, + // and 384. // // The maximum capacity must be greater than or equal to the minimum capacity. MaxCapacity *int64 `type:"integer"` // The minimum capacity for an Aurora DB cluster in serverless DB engine mode. // - // Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256. + // For Aurora MySQL, valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, + // and 256. + // + // For Aurora PostgreSQL, valid capacity values are 2, 4, 8, 16, 32, 64, 192, + // and 384. // // The minimum capacity must be less than or equal to the maximum capacity. MinCapacity *int64 `type:"integer"` diff --git a/service/s3/api_enums.go b/service/s3/api_enums.go index 020b282ac70..f1f699867d3 100644 --- a/service/s3/api_enums.go +++ b/service/s3/api_enums.go @@ -177,17 +177,23 @@ type Event string // Enum values for Event const ( - EventS3ReducedRedundancyLostObject Event = "s3:ReducedRedundancyLostObject" - EventS3ObjectCreated Event = "s3:ObjectCreated:*" - EventS3ObjectCreatedPut Event = "s3:ObjectCreated:Put" - EventS3ObjectCreatedPost Event = "s3:ObjectCreated:Post" - EventS3ObjectCreatedCopy Event = "s3:ObjectCreated:Copy" - EventS3ObjectCreatedCompleteMultipartUpload Event = "s3:ObjectCreated:CompleteMultipartUpload" - EventS3ObjectRemoved Event = "s3:ObjectRemoved:*" - EventS3ObjectRemovedDelete Event = "s3:ObjectRemoved:Delete" - EventS3ObjectRemovedDeleteMarkerCreated Event = "s3:ObjectRemoved:DeleteMarkerCreated" - EventS3ObjectRestorePost Event = "s3:ObjectRestore:Post" - EventS3ObjectRestoreCompleted Event = "s3:ObjectRestore:Completed" + EventS3ReducedRedundancyLostObject Event = "s3:ReducedRedundancyLostObject" + EventS3ObjectCreated Event = "s3:ObjectCreated:*" + EventS3ObjectCreatedPut Event = "s3:ObjectCreated:Put" + EventS3ObjectCreatedPost Event = "s3:ObjectCreated:Post" + EventS3ObjectCreatedCopy Event = "s3:ObjectCreated:Copy" + EventS3ObjectCreatedCompleteMultipartUpload Event = "s3:ObjectCreated:CompleteMultipartUpload" + EventS3ObjectRemoved Event = "s3:ObjectRemoved:*" + EventS3ObjectRemovedDelete Event = "s3:ObjectRemoved:Delete" + EventS3ObjectRemovedDeleteMarkerCreated Event = "s3:ObjectRemoved:DeleteMarkerCreated" + EventS3ObjectRestore Event = "s3:ObjectRestore:*" + EventS3ObjectRestorePost Event = "s3:ObjectRestore:Post" + EventS3ObjectRestoreCompleted Event = "s3:ObjectRestore:Completed" + EventS3Replication Event = "s3:Replication:*" + EventS3ReplicationOperationFailedReplication Event = "s3:Replication:OperationFailedReplication" + EventS3ReplicationOperationNotTracked Event = "s3:Replication:OperationNotTracked" + EventS3ReplicationOperationMissedThreshold Event = "s3:Replication:OperationMissedThreshold" + EventS3ReplicationOperationReplicatedAfterThreshold Event = "s3:Replication:OperationReplicatedAfterThreshold" ) func (enum Event) MarshalValue() (string, error) { @@ -199,6 +205,23 @@ func (enum Event) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type ExistingObjectReplicationStatus string + +// Enum values for ExistingObjectReplicationStatus +const ( + ExistingObjectReplicationStatusEnabled ExistingObjectReplicationStatus = "Enabled" + ExistingObjectReplicationStatusDisabled ExistingObjectReplicationStatus = "Disabled" +) + +func (enum ExistingObjectReplicationStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ExistingObjectReplicationStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type ExpirationStatus string // Enum values for ExpirationStatus @@ -413,6 +436,23 @@ func (enum MetadataDirective) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type MetricsStatus string + +// Enum values for MetricsStatus +const ( + MetricsStatusEnabled MetricsStatus = "Enabled" + MetricsStatusDisabled MetricsStatus = "Disabled" +) + +func (enum MetricsStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum MetricsStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type ObjectCannedACL string // Enum values for ObjectCannedACL @@ -663,6 +703,23 @@ func (enum ReplicationStatus) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type ReplicationTimeStatus string + +// Enum values for ReplicationTimeStatus +const ( + ReplicationTimeStatusEnabled ReplicationTimeStatus = "Enabled" + ReplicationTimeStatusDisabled ReplicationTimeStatus = "Disabled" +) + +func (enum ReplicationTimeStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ReplicationTimeStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + // If present, indicates that the requester was successfully charged for the // request. type RequestCharged string diff --git a/service/s3/api_errors.go b/service/s3/api_errors.go index 931cb17bb05..4db90702493 100644 --- a/service/s3/api_errors.go +++ b/service/s3/api_errors.go @@ -13,6 +13,12 @@ const ( // ErrCodeBucketAlreadyOwnedByYou for service response error code // "BucketAlreadyOwnedByYou". + // + // The bucket you tried to create already exists, and you own it. Amazon S3 + // returns this error in all AWS Regions except in the North Virginia region. + // For legacy compatibility, if you re-create an existing bucket that you already + // own in the North Virginia region, Amazon S3 returns 200 OK and resets the + // bucket access control lists (ACLs). ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" // ErrCodeNoSuchBucket for service response error code diff --git a/service/s3/api_examples_test.go b/service/s3/api_examples_test.go index 61c94a8719f..d9cb4ce301d 100644 --- a/service/s3/api_examples_test.go +++ b/service/s3/api_examples_test.go @@ -147,9 +147,10 @@ func ExampleClient_CopyObjectRequest_shared00() { fmt.Println(result) } -// To create a bucket +// To create a bucket in a specific region // -// The following example creates a bucket. +// The following example creates a bucket. The request specifies an AWS region where +// to create the bucket. func ExampleClient_CreateBucketRequest_shared00() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -159,6 +160,9 @@ func ExampleClient_CreateBucketRequest_shared00() { svc := s3.New(cfg) input := &s3.CreateBucketInput{ Bucket: aws.String("examplebucket"), + CreateBucketConfiguration: &s3.CreateBucketConfiguration{ + LocationConstraint: s3.BucketLocationConstraintEuWest1, + }, } req := svc.CreateBucketRequest(input) @@ -184,10 +188,9 @@ func ExampleClient_CreateBucketRequest_shared00() { fmt.Println(result) } -// To create a bucket in a specific region +// To create a bucket // -// The following example creates a bucket. The request specifies an AWS region where -// to create the bucket. +// The following example creates a bucket. func ExampleClient_CreateBucketRequest_shared01() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -197,9 +200,6 @@ func ExampleClient_CreateBucketRequest_shared01() { svc := s3.New(cfg) input := &s3.CreateBucketInput{ Bucket: aws.String("examplebucket"), - CreateBucketConfiguration: &s3.CreateBucketConfiguration{ - LocationConstraint: s3.BucketLocationConstraintEuWest1, - }, } req := svc.CreateBucketRequest(input) @@ -630,11 +630,11 @@ func ExampleClient_DeleteObjectTaggingRequest_shared01() { fmt.Println(result) } -// To delete multiple objects from a versioned bucket +// To delete multiple object versions from a versioned bucket // -// The following example deletes objects from a bucket. The bucket is versioned, and -// the request does not specify the object version to delete. In this case, all versions -// remain in the bucket and S3 adds a delete marker. +// The following example deletes objects from a bucket. The request specifies object +// versions. S3 deletes specific object versions and returns the key and versions of +// deleted objects in the response. func ExampleClient_DeleteObjectsRequest_shared00() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -647,10 +647,12 @@ func ExampleClient_DeleteObjectsRequest_shared00() { Delete: &s3.Delete{ Objects: []s3.ObjectIdentifier{ { - Key: aws.String("objectkey1"), + Key: aws.String("HappyFace.jpg"), + VersionId: aws.String("2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b"), }, { - Key: aws.String("objectkey2"), + Key: aws.String("HappyFace.jpg"), + VersionId: aws.String("yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd"), }, }, Quiet: aws.Bool(false), @@ -676,11 +678,11 @@ func ExampleClient_DeleteObjectsRequest_shared00() { fmt.Println(result) } -// To delete multiple object versions from a versioned bucket +// To delete multiple objects from a versioned bucket // -// The following example deletes objects from a bucket. The request specifies object -// versions. S3 deletes specific object versions and returns the key and versions of -// deleted objects in the response. +// The following example deletes objects from a bucket. The bucket is versioned, and +// the request does not specify the object version to delete. In this case, all versions +// remain in the bucket and S3 adds a delete marker. func ExampleClient_DeleteObjectsRequest_shared01() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -693,12 +695,10 @@ func ExampleClient_DeleteObjectsRequest_shared01() { Delete: &s3.Delete{ Objects: []s3.ObjectIdentifier{ { - Key: aws.String("HappyFace.jpg"), - VersionId: aws.String("2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b"), + Key: aws.String("objectkey1"), }, { - Key: aws.String("HappyFace.jpg"), - VersionId: aws.String("yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd"), + Key: aws.String("objectkey2"), }, }, Quiet: aws.Bool(false), @@ -1438,9 +1438,10 @@ func ExampleClient_ListBucketsRequest_shared00() { fmt.Println(result) } -// To list in-progress multipart uploads on a bucket +// List next set of multipart uploads when previous result is truncated // -// The following example lists in-progress multipart uploads on a specific bucket. +// The following example specifies the upload-id-marker and key-marker from previous +// truncated response to retrieve next setup of multipart uploads. func ExampleClient_ListMultipartUploadsRequest_shared00() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -1449,7 +1450,10 @@ func ExampleClient_ListMultipartUploadsRequest_shared00() { svc := s3.New(cfg) input := &s3.ListMultipartUploadsInput{ - Bucket: aws.String("examplebucket"), + Bucket: aws.String("examplebucket"), + KeyMarker: aws.String("nextkeyfrompreviousresponse"), + MaxUploads: aws.Int64(2), + UploadIdMarker: aws.String("valuefrompreviousresponse"), } req := svc.ListMultipartUploadsRequest(input) @@ -1471,10 +1475,9 @@ func ExampleClient_ListMultipartUploadsRequest_shared00() { fmt.Println(result) } -// List next set of multipart uploads when previous result is truncated +// To list in-progress multipart uploads on a bucket // -// The following example specifies the upload-id-marker and key-marker from previous -// truncated response to retrieve next setup of multipart uploads. +// The following example lists in-progress multipart uploads on a specific bucket. func ExampleClient_ListMultipartUploadsRequest_shared01() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -1483,10 +1486,7 @@ func ExampleClient_ListMultipartUploadsRequest_shared01() { svc := s3.New(cfg) input := &s3.ListMultipartUploadsInput{ - Bucket: aws.String("examplebucket"), - KeyMarker: aws.String("nextkeyfrompreviousresponse"), - MaxUploads: aws.Int64(2), - UploadIdMarker: aws.String("valuefrompreviousresponse"), + Bucket: aws.String("examplebucket"), } req := svc.ListMultipartUploadsRequest(input) @@ -2133,11 +2133,10 @@ func ExampleClient_PutBucketWebsiteRequest_shared00() { fmt.Println(result) } -// To upload an object and specify canned ACL. +// To create an object. // -// The following example uploads and object. The request specifies optional canned ACL -// (access control list) to all READ access to authenticated users. If the bucket is -// versioning enabled, S3 returns version ID in response. +// The following example creates an object. If the bucket is versioning enabled, S3 +// returns version ID in response. func ExampleClient_PutObjectRequest_shared00() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -2146,10 +2145,9 @@ func ExampleClient_PutObjectRequest_shared00() { svc := s3.New(cfg) input := &s3.PutObjectInput{ - ACL: s3.ObjectCannedACLAuthenticatedRead, Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), + Key: aws.String("objectkey"), } req := svc.PutObjectRequest(input) @@ -2171,11 +2169,10 @@ func ExampleClient_PutObjectRequest_shared00() { fmt.Println(result) } -// To upload an object +// To upload an object and specify optional tags // -// The following example uploads an object to a versioning-enabled bucket. The source -// file is specified using Windows file syntax. S3 returns VersionId of the newly created -// object. +// The following example uploads an object. The request specifies optional object tags. +// The bucket is versioned, therefore S3 returns version ID of the newly created object. func ExampleClient_PutObjectRequest_shared01() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -2184,9 +2181,10 @@ func ExampleClient_PutObjectRequest_shared01() { svc := s3.New(cfg) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), + Tagging: aws.String("key1=value1&key2=value2"), } req := svc.PutObjectRequest(input) @@ -2208,10 +2206,11 @@ func ExampleClient_PutObjectRequest_shared01() { fmt.Println(result) } -// To create an object. +// To upload an object and specify server-side encryption and object tags // -// The following example creates an object. If the bucket is versioning enabled, S3 -// returns version ID in response. +// The following example uploads and object. The request specifies the optional server-side +// encryption option. The request also specifies optional object tags. If the bucket +// is versioning enabled, S3 returns version ID in response. func ExampleClient_PutObjectRequest_shared02() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -2220,9 +2219,11 @@ func ExampleClient_PutObjectRequest_shared02() { svc := s3.New(cfg) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), - Bucket: aws.String("examplebucket"), - Key: aws.String("objectkey"), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), + ServerSideEncryption: s3.ServerSideEncryptionAes256, + Tagging: aws.String("key1=value1&key2=value2"), } req := svc.PutObjectRequest(input) @@ -2244,10 +2245,11 @@ func ExampleClient_PutObjectRequest_shared02() { fmt.Println(result) } -// To upload object and specify user-defined metadata +// To upload an object // -// The following example creates an object. The request also specifies optional metadata. -// If the bucket is versioning enabled, S3 returns version ID in response. +// The following example uploads an object to a versioning-enabled bucket. The source +// file is specified using Windows file syntax. S3 returns VersionId of the newly created +// object. func ExampleClient_PutObjectRequest_shared03() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -2256,13 +2258,9 @@ func ExampleClient_PutObjectRequest_shared03() { svc := s3.New(cfg) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), - Metadata: map[string]string{ - "metadata1": "value1", - "metadata2": "value2", - }, + Key: aws.String("HappyFace.jpg"), } req := svc.PutObjectRequest(input) @@ -2284,10 +2282,11 @@ func ExampleClient_PutObjectRequest_shared03() { fmt.Println(result) } -// To upload an object and specify optional tags +// To upload an object and specify canned ACL. // -// The following example uploads an object. The request specifies optional object tags. -// The bucket is versioned, therefore S3 returns version ID of the newly created object. +// The following example uploads and object. The request specifies optional canned ACL +// (access control list) to all READ access to authenticated users. If the bucket is +// versioning enabled, S3 returns version ID in response. func ExampleClient_PutObjectRequest_shared04() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -2296,10 +2295,10 @@ func ExampleClient_PutObjectRequest_shared04() { svc := s3.New(cfg) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")), - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), - Tagging: aws.String("key1=value1&key2=value2"), + ACL: s3.ObjectCannedACLAuthenticatedRead, + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), } req := svc.PutObjectRequest(input) @@ -2321,11 +2320,10 @@ func ExampleClient_PutObjectRequest_shared04() { fmt.Println(result) } -// To upload an object and specify server-side encryption and object tags +// To upload an object (specify optional headers) // -// The following example uploads and object. The request specifies the optional server-side -// encryption option. The request also specifies optional object tags. If the bucket -// is versioning enabled, S3 returns version ID in response. +// The following example uploads an object. The request specifies optional request headers +// to directs S3 to use specific storage class and use server-side encryption. func ExampleClient_PutObjectRequest_shared05() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -2334,11 +2332,11 @@ func ExampleClient_PutObjectRequest_shared05() { svc := s3.New(cfg) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), + Key: aws.String("HappyFace.jpg"), ServerSideEncryption: s3.ServerSideEncryptionAes256, - Tagging: aws.String("key1=value1&key2=value2"), + StorageClass: s3.StorageClassStandardIa, } req := svc.PutObjectRequest(input) @@ -2360,10 +2358,10 @@ func ExampleClient_PutObjectRequest_shared05() { fmt.Println(result) } -// To upload an object (specify optional headers) +// To upload object and specify user-defined metadata // -// The following example uploads an object. The request specifies optional request headers -// to directs S3 to use specific storage class and use server-side encryption. +// The following example creates an object. The request also specifies optional metadata. +// If the bucket is versioning enabled, S3 returns version ID in response. func ExampleClient_PutObjectRequest_shared06() { cfg, err := external.LoadDefaultAWSConfig() if err != nil { @@ -2372,11 +2370,13 @@ func ExampleClient_PutObjectRequest_shared06() { svc := s3.New(cfg) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), - ServerSideEncryption: s3.ServerSideEncryptionAes256, - StorageClass: s3.StorageClassStandardIa, + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), + Metadata: map[string]string{ + "metadata1": "value1", + "metadata2": "value2", + }, } req := svc.PutObjectRequest(input) diff --git a/service/s3/api_op_AbortMultipartUpload.go b/service/s3/api_op_AbortMultipartUpload.go index 6405539dc74..844aed2ee99 100644 --- a/service/s3/api_op_AbortMultipartUpload.go +++ b/service/s3/api_op_AbortMultipartUpload.go @@ -13,7 +13,7 @@ import ( type AbortMultipartUploadInput struct { _ struct{} `type:"structure"` - // Name of the bucket to which the multipart upload was initiated. + // The bucket to which the upload was taking place. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -131,12 +131,32 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // AbortMultipartUploadRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Aborts a multipart upload. +// This operation aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads +// are currently in progress, those part uploads might or might not succeed. +// As a result, it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. // // To verify that all parts have been removed, so you don't get charged for -// the part storage, you should call the List Parts operation and ensure the +// the part storage, you should call the ListParts operation and ensure the // parts list is empty. // +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to AbortMultipartUpload +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads +// // // Example sending a request using AbortMultipartUploadRequest. // req := client.AbortMultipartUploadRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_CompleteMultipartUpload.go b/service/s3/api_op_CompleteMultipartUpload.go index c89ea9a5f81..e99e814fb06 100644 --- a/service/s3/api_op_CompleteMultipartUpload.go +++ b/service/s3/api_op_CompleteMultipartUpload.go @@ -13,12 +13,17 @@ import ( type CompleteMultipartUploadInput struct { _ struct{} `type:"structure" payload:"MultipartUpload"` + // Name of the bucket to which the multipart upload was initiated. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // The container for the multipart upload request information. MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that she or he will be charged for the @@ -27,6 +32,8 @@ type CompleteMultipartUploadInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer RequestPayer `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"true"` + // ID for the initiated multipart upload. + // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -107,32 +114,43 @@ func (s CompleteMultipartUploadInput) MarshalFields(e protocol.FieldEncoder) err type CompleteMultipartUploadOutput struct { _ struct{} `type:"structure"` + // The name of the bucket that contains the newly created object. Bucket *string `type:"string"` - // Entity tag of the object. + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is + // an opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will + // contain one or more nonhexadecimal characters and/or will consist of less + // than 32 or more than 32 hexadecimal digits. ETag *string `type:"string"` // If the object expiration is configured, this will contain the expiration // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + // The object key of the newly created object. Key *string `min:"1" type:"string"` + // The URI that identifies the newly created object. Location *string `type:"string"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged RequestCharged `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (KMS) customer + // master key (CMK) that was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an AWS KMS customer master key (CMK) in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. ServerSideEncryption ServerSideEncryption `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"true"` - // Version of the object. + // Version ID of the newly created object, in case the bucket has versioning + // turned on. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -214,6 +232,64 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload" // // Completes a multipart upload by assembling previously uploaded parts. // +// You first initiate the multipart upload and then upload all parts using the +// UploadPart operation. After successfully uploading all relevant parts of +// an upload, you call this operation to complete the upload. Upon receiving +// this request, Amazon S3 concatenates all the parts in ascending order by +// part number to create a new object. In the Complete Multipart Upload request, +// you must provide the parts list. You must ensure the parts list is complete, +// this operation concatenates the parts you provide in the list. For each part +// in the list, you must provide the part number and the ETag value, returned +// after that part was uploaded. +// +// Processing of a Complete Multipart Upload request could take several minutes +// to complete. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends whitespace characters to keep the +// connection from timing out. Because a request could fail after the initial +// 200 OK response has been sent, it is important that you check the response +// body to determine whether the request succeeded. +// +// Note that if CompleteMultipartUpload fails, applications should be prepared +// to retry the failed requests. For more information, see Amazon S3 Error Best +// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// GetBucketLifecycle has the following special errors: +// +// * Error code: EntityTooSmall Description: Your proposed upload is smaller +// than the minimum allowed object size. Each part must be at least 5 MB +// in size, except the last part. 400 Bad Request +// +// * Error code: InvalidPart Description: One or more of the specified parts +// could not be found. The part might not have been uploaded, or the specified +// entity tag might not have matched the part's entity tag. 400 Bad Request +// +// * Error code: InvalidPartOrder Description: The list of parts was not +// in ascending order. The parts list must be specified in order by part +// number. 400 Bad Request +// +// * Error code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. 404 Not Found +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads +// // // Example sending a request using CompleteMultipartUploadRequest. // req := client.CompleteMultipartUploadRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_CopyObject.go b/service/s3/api_op_CopyObject.go index ea85a19f291..bb27e837bfa 100644 --- a/service/s3/api_op_CopyObject.go +++ b/service/s3/api_op_CopyObject.go @@ -17,6 +17,8 @@ type CopyObjectInput struct { // The canned ACL to apply to the object. ACL ObjectCannedACL `location:"header" locationName:"x-amz-acl" type:"string" enum:"true"` + // The name of the destination bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -84,6 +86,8 @@ type CopyObjectInput struct { // Allows grantee to write the ACL for the applicable object. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // The key of the destination object. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -97,10 +101,10 @@ type CopyObjectInput struct { // Specifies whether you want to apply a Legal Hold to the copied object. ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"true"` - // The object lock mode that you want to apply to the copied object. + // The Object Lock mode that you want to apply to the copied object. ObjectLockMode ObjectLockMode `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"true"` - // The date and time when you want the copied object's object lock to expire. + // The date and time when you want the copied object's Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that she or he will be charged for the @@ -449,8 +453,10 @@ func (s CopyObjectInput) MarshalFields(e protocol.FieldEncoder) error { type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` + // Container for all response elements. CopyObjectResult *CopyObjectResult `type:"structure"` + // Version of the copied object in the destination bucket. CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` // If the object expiration is configured, the response includes this header. @@ -475,8 +481,8 @@ type CopyObjectOutput struct { // the encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (KMS) customer + // master key (CMK) that was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 @@ -564,6 +570,189 @@ const opCopyObject = "CopyObject" // // Creates a copy of an object that is already stored in Amazon S3. // +// You can store individual objects of up to 5 TB in Amazon S3. You create a +// copy of your object up to 5 GB in size in a single atomic operation using +// this API. However, for copying an object greater than 5 GB, you must use +// the multipart upload Upload Part - Copy API. For conceptual information, +// see Copy Object Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// +// When copying an object, you can preserve all metadata (default) or specify +// new metadata. However, the ACL is not preserved and is set to private for +// the user making the request. To override the default ACL setting, specify +// a new ACL when generating a copy request. For more information, see Using +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// +// Amazon S3 Transfer Acceleration does not support cross-region copies. If +// you request a cross-region copy using a Transfer Acceleration endpoint, you +// get a 400 Bad Request error. For more information about transfer acceleration, +// see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// All copy requests must be authenticated. Additionally, you must have read +// access to the source object and write access to the destination bucket. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// Both the Region that you want to copy the object from and the Region that +// you want to copy the object to must be enabled for your account. +// +// To only copy an object under certain conditions, such as whether the Etag +// matches or whether the object was modified before or after a specified date, +// use the request parameters x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, +// x-amz-copy-source-if-unmodified-since, or x-amz-copy-source-if-modified-since. +// +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. +// +// You can use this operation to change the storage class of an object that +// is already stored in Amazon S3 using the StorageClass parameter. For more +// information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). +// +// The source object that you are copying can be encrypted or unencrypted. If +// the source object is encrypted, it can be encrypted by server-side encryption +// using AWS-managed encryption keys or by using a customer-provided encryption +// key. When copying an object, you can request that Amazon S3 encrypt the target +// object by using either the AWS-managed encryption keys or by using your own +// encryption key. You can do this regardless of the form of server-side encryption +// that was used to encrypt the source, or even if the source object was not +// encrypted. For more information about server-side encryption, see Using Server-Side +// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// +// A copy request might return an error when Amazon S3 receives the copy request +// or while Amazon S3 is copying the files. If the error occurs before the copy +// operation starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK response. +// This means that a 200 OK response can contain either a success or an error. +// Design your application to parse the contents of the response and handle +// it appropriately. +// +// If the copy is successful, you receive a response with information about +// the copied object. +// +// If the request is an HTTP 1.1 request, the response is chunk encoded. If +// it were not, it would not contain the content-length, and you would need +// to read the entire body. +// +// Consider the following when using request headers: +// +// * Consideration 1 – If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 +// returns 200 OK and copies the data: x-amz-copy-source-if-match condition +// evaluates to true x-amz-copy-source-if-unmodified-since condition evaluates +// to false +// +// * Consideration 2 – If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request +// and evaluate as follows, Amazon S3 returns the 412 Precondition Failed +// response code: x-amz-copy-source-if-none-match condition evaluates to +// false x-amz-copy-source-if-modified-since condition evaluates to true +// +// The copy request charge is based on the storage class and Region you specify +// for the destination object. For pricing information, see Amazon S3 Pricing +// (https://aws.amazon.com/s3/pricing/). +// +// Following are other considerations when using CopyObject: +// +// Versioning +// +// By default, x-amz-copy-source identifies the current version of an object +// to copy. (If the current version is a delete marker, Amazon S3 behaves as +// if the object was deleted.) To copy a different version, use the versionId +// subresource. +// +// If you enable versioning on the target bucket, Amazon S3 generates a unique +// version ID for the object being copied. This version ID is different from +// the version ID of the source object. Amazon S3 returns the version ID of +// the copied object in the x-amz-version-id response header in the response. +// +// If you do not enable versioning or suspend it on the target bucket, the version +// ID that Amazon S3 generates is always null. +// +// If the source object's storage class is GLACIER, then you must restore a +// copy of this object before you can use it as a source object for the copy +// operation. For more information, see . +// +// Access Permissions +// +// When copying an object, you can optionally specify the accounts or groups +// that should be granted specific permissions on the new object. There are +// two ways to grant the permissions using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// To encrypt the target object, you must provide the appropriate encryption-related +// request headers. The one you use depends on whether you want to use AWS-managed +// encryption keys or provide your own encryption key. +// +// * To encrypt the target object using server-side encryption with an AWS-managed +// encryption key, provide the following request headers, as appropriate. +// x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed customer master key (CMK) in KMS to protect the +// data. All GET and PUT requests for an object protected by AWS KMS fail +// if you don't make them with SSL or by using SigV4. For more information +// on Server-Side Encryption with CMKs stored in Amazon KMS (SSE-KMS), see +// Protecting Data Using Server-Side Encryption with CMKs stored in KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * To encrypt the target object using server-side encryption with an encryption +// key that you provide, use the following headers. x-amz-server-side​-encryption​-customer-algorithm +// x-amz-server-side​-encryption​-customer-key x-amz-server-side​-encryption​-customer-key-MD5 +// +// * If the source object is encrypted using server-side encryption with +// customer-provided encryption keys, you must use the following headers. +// x-amz-copy-source​-server-side​-encryption​-customer-algorithm x-amz-copy-source​-server-side​-encryption​-customer-key +// x-amz-copy-source-​server-side​-encryption​-customer-key-MD5 For +// more information on Server-Side Encryption with CMKs stored in Amazon +// KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs +// stored in Amazon KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the Access Control List (ACL) on the object. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific AWS accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an +// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: emailAddress – if the value specified is the email address +// of an AWS account id – if the value specified is the canonical user +// ID of an AWS account uri – if you are granting permissions to a predefined +// group For example, the following x-amz-grant-read header grants the AWS +// accounts identified by email addresses permissions to read object data +// and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// The following operation are related to CopyObject +// +// * PutObject +// +// * GetObject +// +// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +// // // Example sending a request using CopyObjectRequest. // req := client.CopyObjectRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_CreateBucket.go b/service/s3/api_op_CreateBucket.go index ebc39913122..c8562e81fe8 100644 --- a/service/s3/api_op_CreateBucket.go +++ b/service/s3/api_op_CreateBucket.go @@ -16,9 +16,12 @@ type CreateBucketInput struct { // The canned ACL to apply to the bucket. ACL BucketCannedACL `location:"header" locationName:"x-amz-acl" type:"string" enum:"true"` + // The name of the bucket to create. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The configuration information for the bucket. CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Allows grantee the read, write, read ACP, and write ACP permissions on the @@ -37,8 +40,7 @@ type CreateBucketInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - // Specifies whether you want Amazon S3 object lock to be enabled for the new - // bucket. + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` } @@ -131,6 +133,9 @@ func (s CreateBucketInput) MarshalFields(e protocol.FieldEncoder) error { type CreateBucketOutput struct { _ struct{} `type:"structure"` + // Specifies the region where the bucket will be created. If you are creating + // a bucket on the US East (N. Virginia) region (us-east-1), you do not need + // to specify the location. Location *string `location:"header" locationName:"Location" type:"string"` } @@ -155,7 +160,60 @@ const opCreateBucket = "CreateBucket" // CreateBucketRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Creates a new bucket. +// Creates a new bucket. To create a bucket, you must register with Amazon S3 +// and have a valid AWS Access Key ID to authenticate requests. Anonymous requests +// are never allowed to create buckets. By creating the bucket, you become the +// bucket owner. +// +// Not every string is an acceptable bucket name. For information on bucket +// naming restrictions, see Working with Amazon S3 Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). +// +// By default, the bucket is created in the US East (N. Virginia) region. You +// can optionally specify a region in the request body. You might choose a region +// to optimize latency, minimize costs, or address regulatory requirements. +// For example, if you reside in Europe, you will probably find it advantageous +// to create buckets in the EU (Ireland) region. For more information, see How +// to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +// +// If you send your create bucket request to the s3.amazonaws.com endpoint, +// the request go to the us-east-1 region. Accordingly, the signature calculations +// in Signature Version 4 must use us-east-1 as region, even if the location +// constraint in the request specifies another region where the bucket is to +// be created. If you create a bucket in a region other than US East (N. Virginia) +// region, your application must be able to handle 307 redirect. For more information, +// see Virtual Hosting of Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). +// +// When creating a bucket using this operation, you can optionally specify the +// accounts or groups that should be granted specific permissions on the bucket. +// There are two ways to grant the appropriate permissions using the request +// headers. +// +// * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. For more information, see +// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, +// x-amz-grant-read-acp, x-amz-grant-write-acp, x-amz-grant-full-control +// headers. These headers map to the set of permissions Amazon S3 supports +// in an ACL. For more information, see Access Control List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You +// specify each grantee as a type=value pair, where the type is one of the +// following: emailAddress – if the value specified is the email address +// of an AWS account id – if the value specified is the canonical user +// ID of an AWS account uri – if you are granting permissions to a predefined +// group For example, the following x-amz-grant-read header grants the AWS +// accounts identified by email addresses permissions to read object data +// and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// The following operations are related to CreateBucket: +// +// * PutObject +// +// * DeleteBucket // // // Example sending a request using CreateBucketRequest. // req := client.CreateBucketRequest(params) diff --git a/service/s3/api_op_CreateMultipartUpload.go b/service/s3/api_op_CreateMultipartUpload.go index 2ff3d29111d..a5b81f0ed90 100644 --- a/service/s3/api_op_CreateMultipartUpload.go +++ b/service/s3/api_op_CreateMultipartUpload.go @@ -17,6 +17,8 @@ type CreateMultipartUploadInput struct { // The canned ACL to apply to the object. ACL ObjectCannedACL `location:"header" locationName:"x-amz-acl" type:"string" enum:"true"` + // The name of the bucket to which to initiate the upload + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -52,6 +54,8 @@ type CreateMultipartUploadInput struct { // Allows grantee to write the ACL for the applicable object. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Object key for which the multipart upload is to be initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -61,10 +65,10 @@ type CreateMultipartUploadInput struct { // Specifies whether you want to apply a Legal Hold to the uploaded object. ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"true"` - // Specifies the object lock mode that you want to apply to the uploaded object. + // Specifies the Object Lock mode that you want to apply to the uploaded object. ObjectLockMode ObjectLockMode `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"true"` - // Specifies the date and time when you want the object lock to expire. + // Specifies the date and time when you want the Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that she or he will be charged for the @@ -334,11 +338,20 @@ func (s CreateMultipartUploadInput) MarshalFields(e protocol.FieldEncoder) error type CreateMultipartUploadOutput struct { _ struct{} `type:"structure"` - // Date when multipart upload will become eligible for abort operation by lifecycle. + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, the response includes this header. The header indicates + // when the initiated multipart upload becomes eligible for an abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response also includes the x-amz-abort-rule-id header that provides the + // ID of the lifecycle configuration rule that defines this action. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // Name of the bucket to which the multipart upload was initiated. @@ -366,8 +379,8 @@ type CreateMultipartUploadOutput struct { // the encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (KMS) customer + // master key (CMK) that was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 @@ -467,13 +480,146 @@ const opCreateMultipartUpload = "CreateMultipartUpload" // CreateMultipartUploadRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Initiates a multipart upload and returns an upload ID. +// This operation initiates a multipart upload and returns an upload ID. This +// upload ID is used to associate all of the parts in the specific multipart +// upload. You specify this upload ID in each of your subsequent upload part +// requests (see UploadPart). You also include this upload ID in the final request +// to either complete or abort the multipart upload request. // -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. +// For more information about multipart uploads, see Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the upload must complete within the number of days specified in the bucket +// lifecycle configuration. Otherwise, the incomplete multipart upload becomes +// eligible for an abort operation and Amazon S3 aborts the multipart upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// +// For information about the permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// For request signing, multipart upload is just a series of regular requests. +// You initiate a multipart upload, send one or more requests to upload parts, +// and then complete the multipart upload process. You sign each request individually. +// There is nothing special about signing multipart upload requests. For more +// information about signing, see Authenticating Requests (AWS Signature Version +// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stop charging you for storing them only after you either complete +// or abort a multipart upload. +// +// You can optionally request server-side encryption. For server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. You can provide your own encryption key, +// or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or +// Amazon S3-managed encryption keys. If you choose to provide your own encryption +// key, the request headers you provide in UploadPart) and UploadPartCopy) requests +// must match the headers you used in the request to initiate the upload by +// using CreateMultipartUpload. +// +// To perform a multipart upload with encryption using an AWS KMS CMK, the requester +// must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, +// and kms:DescribeKey actions on the key. These permissions are required because +// Amazon S3 must decrypt and read data from the encrypted file parts before +// it completes the multipart upload. +// +// If your AWS Identity and Access Management (IAM) user or role is in the same +// AWS account as the AWS KMS CMK, then you must have these permissions on the +// key policy. If your IAM user or role belongs to a different account than +// the key, then you must have the permissions on both the key policy and your +// IAM user or role. +// +// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Permissions +// +// When copying an object, you can optionally specify the accounts or groups +// that should be granted specific permissions on the new object. There are +// two ways to grant the permissions using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use AWS-managed encryption keys or provide your own encryption key. +// +// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) +// stored in Amazon Key Management Service (KMS) – If you want AWS to manage +// the keys used to encrypt data, specify the following headers in the request. +// x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and +// PUT requests for an object protected by AWS KMS fail if you don't make +// them with SSL or by using SigV4. For more information on Server-Side Encryption +// with CMKs Stored in Amazon KMS (SSE-KMS), see Protecting Data Using Server-Side +// Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// x-amz-server-side​-encryption​-customer-algorithm x-amz-server-side​-encryption​-customer-key +// x-amz-server-side​-encryption​-customer-key-MD5 For more information +// on Server-Side Encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting +// Data Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the Access Control List (ACL) on the object. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific AWS accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an +// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: emailAddress – if the value specified is the email address +// of an AWS account id – if the value specified is the canonical user +// ID of an AWS account uri – if you are granting permissions to a predefined +// group For example, the following x-amz-grant-read header grants the AWS +// accounts identified by email addresses permissions to read object data +// and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// The following operations are related to CreateMultipartUpload: +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads // // // Example sending a request using CreateMultipartUploadRequest. // req := client.CreateMultipartUploadRequest(params) diff --git a/service/s3/api_op_DeleteBucket.go b/service/s3/api_op_DeleteBucket.go index ceaeb41df3c..a01f2113812 100644 --- a/service/s3/api_op_DeleteBucket.go +++ b/service/s3/api_op_DeleteBucket.go @@ -14,6 +14,8 @@ import ( type DeleteBucketInput struct { _ struct{} `type:"structure"` + // Specifies the bucket being deleted. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -78,6 +80,12 @@ const opDeleteBucket = "DeleteBucket" // Deletes the bucket. All objects (including all object versions and Delete // Markers) in the bucket must be deleted before the bucket itself can be deleted. // +// Related Resources +// +// * +// +// * +// // // Example sending a request using DeleteBucketRequest. // req := client.DeleteBucketRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go index 33fd6b26e79..27920785c99 100644 --- a/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go +++ b/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go @@ -97,7 +97,20 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration // // To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to DeleteBucketAnalyticsConfiguration: +// +// * +// +// * +// +// * // // // Example sending a request using DeleteBucketAnalyticsConfigurationRequest. // req := client.DeleteBucketAnalyticsConfigurationRequest(params) diff --git a/service/s3/api_op_DeleteBucketCors.go b/service/s3/api_op_DeleteBucketCors.go index 674a2b4126f..4ac00b438ff 100644 --- a/service/s3/api_op_DeleteBucketCors.go +++ b/service/s3/api_op_DeleteBucketCors.go @@ -14,6 +14,8 @@ import ( type DeleteBucketCorsInput struct { _ struct{} `type:"structure"` + // Specifies the bucket whose cors configuration is being deleted. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -75,7 +77,21 @@ const opDeleteBucketCors = "DeleteBucketCors" // DeleteBucketCorsRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Deletes the CORS configuration information set for the bucket. +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information more about cors, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. +// +// Related Resources: +// +// * +// +// * RESTOPTIONSobject // // // Example sending a request using DeleteBucketCorsRequest. // req := client.DeleteBucketCorsRequest(params) diff --git a/service/s3/api_op_DeleteBucketEncryption.go b/service/s3/api_op_DeleteBucketEncryption.go index 388e35dfd22..ade96934fd5 100644 --- a/service/s3/api_op_DeleteBucketEncryption.go +++ b/service/s3/api_op_DeleteBucketEncryption.go @@ -78,7 +78,23 @@ const opDeleteBucketEncryption = "DeleteBucketEncryption" // DeleteBucketEncryptionRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Deletes the server-side encryption configuration from the bucket. +// This implementation of the DELETE operation removes default encryption from +// the bucket. For information about the Amazon S3 default encryption feature, +// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev//bucket-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketEncryption +// +// * GetBucketEncryption // // // Example sending a request using DeleteBucketEncryptionRequest. // req := client.DeleteBucketEncryptionRequest(params) diff --git a/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/service/s3/api_op_DeleteBucketInventoryConfiguration.go index c0f07ed45fd..5bbc694a140 100644 --- a/service/s3/api_op_DeleteBucketInventoryConfiguration.go +++ b/service/s3/api_op_DeleteBucketInventoryConfiguration.go @@ -95,6 +95,23 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration // Deletes an inventory configuration (identified by the inventory ID) from // the bucket. // +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// Operation related to DeleteBucketInventoryConfiguration include: +// +// * GetBucketInventoryConfiguration +// +// * PutBucketInventoryConfiguration +// +// * ListBucketInventoryConfigurations +// // // Example sending a request using DeleteBucketInventoryConfigurationRequest. // req := client.DeleteBucketInventoryConfigurationRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_DeleteBucketLifecycle.go b/service/s3/api_op_DeleteBucketLifecycle.go index 4c65dba5ec7..316f6aae7d6 100644 --- a/service/s3/api_op_DeleteBucketLifecycle.go +++ b/service/s3/api_op_DeleteBucketLifecycle.go @@ -14,6 +14,8 @@ import ( type DeleteBucketLifecycleInput struct { _ struct{} `type:"structure"` + // The bucket name of the lifecycle to delete. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -75,7 +77,27 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // DeleteBucketLifecycleRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Deletes the lifecycle configuration from the bucket. +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the +// deleted lifecycle configuration. +// +// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration +// action. By default, the bucket owner has this permission and the bucket owner +// can grant this permission to others. +// +// There is usually some time lag before lifecycle configuration deletion is +// fully propagated to all the Amazon S3 systems. +// +// For more information about the object expiration, see Elements to Describe +// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// +// Related actions include: +// +// * PutBucketLifecycleConfiguration +// +// * GetBucketLifecycleConfiguration // // // Example sending a request using DeleteBucketLifecycleRequest. // req := client.DeleteBucketLifecycleRequest(params) diff --git a/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/service/s3/api_op_DeleteBucketMetricsConfiguration.go index 1e74312a83e..bb7d8317629 100644 --- a/service/s3/api_op_DeleteBucketMetricsConfiguration.go +++ b/service/s3/api_op_DeleteBucketMetricsConfiguration.go @@ -92,8 +92,28 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // DeleteBucketMetricsConfigurationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Deletes a metrics configuration (specified by the metrics configuration ID) -// from the bucket. +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to DeleteBucketMetricsConfiguration +// +// * GetBucketMetricsConfiguration +// +// * PutBucketMetricsConfiguration +// +// * ListBucketMetricsConfigurations +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) // // // Example sending a request using DeleteBucketMetricsConfigurationRequest. // req := client.DeleteBucketMetricsConfigurationRequest(params) diff --git a/service/s3/api_op_DeleteBucketPolicy.go b/service/s3/api_op_DeleteBucketPolicy.go index 645258697df..9b7097f4707 100644 --- a/service/s3/api_op_DeleteBucketPolicy.go +++ b/service/s3/api_op_DeleteBucketPolicy.go @@ -14,6 +14,8 @@ import ( type DeleteBucketPolicyInput struct { _ struct{} `type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -75,7 +77,29 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy" // DeleteBucketPolicyRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Deletes the policy from the bucket. +// This implementation of the DELETE operation uses the policysubresource to +// delete the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the DeleteBucketPolicy permissions on the specified bucket and +// belong to the bucket owner's account in order to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're notusing +// an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to DeleteBucketPolicy +// +// * CreateBucket +// +// * DeleteObject // // // Example sending a request using DeleteBucketPolicyRequest. // req := client.DeleteBucketPolicyRequest(params) diff --git a/service/s3/api_op_DeleteBucketReplication.go b/service/s3/api_op_DeleteBucketReplication.go index f1ccc8a84de..c3f4a68de0f 100644 --- a/service/s3/api_op_DeleteBucketReplication.go +++ b/service/s3/api_op_DeleteBucketReplication.go @@ -16,9 +16,6 @@ type DeleteBucketReplicationInput struct { // The bucket name. // - // It can take a while to propagate the deletion of a replication configuration - // to all Amazon S3 systems. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -80,10 +77,26 @@ const opDeleteBucketReplication = "DeleteBucketReplication" // DeleteBucketReplicationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Deletes the replication configuration from the bucket. For information about -// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration +// action. The bucket owner has these permissions by default and can grant it +// to others. For more information about permissions, see Permissions Related +// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. // +// The following operations are related to DeleteBucketReplication +// +// * PutBucketReplication +// +// * GetBucketReplication +// // // Example sending a request using DeleteBucketReplicationRequest. // req := client.DeleteBucketReplicationRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_DeleteBucketTagging.go b/service/s3/api_op_DeleteBucketTagging.go index 7427386f47c..6c3452ea470 100644 --- a/service/s3/api_op_DeleteBucketTagging.go +++ b/service/s3/api_op_DeleteBucketTagging.go @@ -14,6 +14,8 @@ import ( type DeleteBucketTaggingInput struct { _ struct{} `type:"structure"` + // The bucket that has the tag set to be removed. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -77,6 +79,16 @@ const opDeleteBucketTagging = "DeleteBucketTagging" // // Deletes the tags from the bucket. // +// To use this operation, you must have permission to perform the s3:PutBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// The following operations are related to DeleteBucketTagging +// +// * GetBucketTagging +// +// * PutBucketTagging +// // // Example sending a request using DeleteBucketTaggingRequest. // req := client.DeleteBucketTaggingRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_DeleteBucketWebsite.go b/service/s3/api_op_DeleteBucketWebsite.go index 843804c60d7..2975720e85e 100644 --- a/service/s3/api_op_DeleteBucketWebsite.go +++ b/service/s3/api_op_DeleteBucketWebsite.go @@ -14,6 +14,8 @@ import ( type DeleteBucketWebsiteInput struct { _ struct{} `type:"structure"` + // The bucket name for which you want to remove the website configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -75,7 +77,26 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite" // DeleteBucketWebsiteRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// This operation removes the website configuration from the bucket. +// This operation removes the website configuration for a bucket. Amazon S3 +// returns a 200 OK response upon successfully deleting a website configuration +// on the specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns +// a 404 response if the bucket specified in the request does not exist. +// +// This DELETE operation requires the S3:DeleteBucketWebsite permission. By +// default, only the bucket owner can delete the website configuration attached +// to a bucket. However, bucket owners can grant other users permission to delete +// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite +// permission. +// +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// The following operations are related to DeleteBucketWebsite +// +// * GetBucketWebsite +// +// * PutBucketWebsite // // // Example sending a request using DeleteBucketWebsiteRequest. // req := client.DeleteBucketWebsiteRequest(params) diff --git a/service/s3/api_op_DeleteObject.go b/service/s3/api_op_DeleteObject.go index 870fd9e80d6..4e9cf6d285a 100644 --- a/service/s3/api_op_DeleteObject.go +++ b/service/s3/api_op_DeleteObject.go @@ -13,18 +13,24 @@ import ( type DeleteObjectInput struct { _ struct{} `type:"structure"` + // The bucket name of the bucket containing the object. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates whether Amazon S3 object lock should bypass governance-mode restrictions + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions // to process this operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Key name of the object to delete. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. + // and the value that is displayed on your authentication device. Required to + // permanently delete a versionedobject if versioning is configured with MFA + // Deleteenabled. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` // Confirms that the requester knows that she or he will be charged for the @@ -165,6 +171,29 @@ const opDeleteObject = "DeleteObject" // marker, which becomes the latest version of the object. If there isn't a // null version, Amazon S3 does not remove any objects. // +// To remove a specific version, you must be the bucket owner and you must use +// the version Id subresource. Using this subresource permanently deletes the +// version. If the object deleted is a delete marker, Amazon S3 sets the response +// header, x-amz-delete-marker, to true. +// +// If the object you want to delete is in a bucket where the bucket versioning +// configurationis MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. +// +// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). +// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// +// You can delete objects by explicitly calling the DELETE Object API or configure +// its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for +// you. If you want to block users or accounts from removing or deleting objects +// from your bucket you must deny them the s3:DeleteObject, s3:DeleteObjectVersion +// and s3:PutLifeCycleConfiguration actions. +// +// The following operation is related to DeleteObject +// +// * PutObject +// // // Example sending a request using DeleteObjectRequest. // req := client.DeleteObjectRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_DeleteObjectTagging.go b/service/s3/api_op_DeleteObjectTagging.go index a4e9f707587..318b4f24732 100644 --- a/service/s3/api_op_DeleteObjectTagging.go +++ b/service/s3/api_op_DeleteObjectTagging.go @@ -13,9 +13,13 @@ import ( type DeleteObjectTaggingInput struct { _ struct{} `type:"structure"` + // The bucket containing the objects from which to remove the tags. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Name of the tag. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -108,7 +112,21 @@ const opDeleteObjectTagging = "DeleteObjectTagging" // DeleteObjectTaggingRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Removes the tag-set from an existing object. +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// +// To use this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteBucketMetricsConfiguration +// +// * PutObjectTagging +// +// * GetObjectTagging // // // Example sending a request using DeleteObjectTaggingRequest. // req := client.DeleteObjectTaggingRequest(params) diff --git a/service/s3/api_op_DeleteObjects.go b/service/s3/api_op_DeleteObjects.go index 60ccfe59bd6..add16c23329 100644 --- a/service/s3/api_op_DeleteObjects.go +++ b/service/s3/api_op_DeleteObjects.go @@ -13,19 +13,25 @@ import ( type DeleteObjectsInput struct { _ struct{} `type:"structure" payload:"Delete"` + // The bucket name containing the objects to delete. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether you want to delete this object even if it has a Governance-type - // object lock in place. You must have sufficient permissions to perform this + // Object Lock in place. You must have sufficient permissions to perform this // operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Container for the request. + // // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // Delete enabled. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` // Confirms that the requester knows that she or he will be charged for the @@ -109,8 +115,12 @@ func (s DeleteObjectsInput) MarshalFields(e protocol.FieldEncoder) error { type DeleteObjectsOutput struct { _ struct{} `type:"structure"` + // Container element for a successful delete. It identifies the object that + // was successfully deleted. Deleted []DeletedObject `type:"list" flattened:"true"` + // Container for a failed delete operation that describes the object that Amazon + // S3 attempted to delete and the error it encountered. Errors []Error `locationName:"Error" type:"list" flattened:"true"` // If present, indicates that the requester was successfully charged for the @@ -164,7 +174,47 @@ const opDeleteObjects = "DeleteObjects" // Amazon Simple Storage Service. // // This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. You may specify up to 1000 keys. +// a single HTTP request. If you know the object keys that you want to delete, +// then this operation provides a suitable alternative to sending individual +// delete requests, reducing per-request overhead. +// +// The request contains a list of up to 1000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if +// you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success, or failure, in the response. Note that, if +// the object specified in the request is not found, Amazon S3 returns the result +// as deleted. +// +// The operation supports two modes for the response; verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion, the operation does not return any information about +// the delete in the response body. +// +// When performing this operation on an MFA Delete enabled bucket, that attempts +// to delete any versioned objects, you must include an MFA token. If you do +// not provide one, the entire request will fail, even if there are non versioned +// objects you are attempting to delete. If you provide an invalid token, whether +// there are versioned keys in the request or not, the entire Multi-Object Delete +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// +// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. +// Amazon S3 uses the header value to ensure that your request body has not +// be altered in transit. +// +// The following operations are related to DeleteObjects +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * ListParts +// +// * AbortMultipartUpload // // // Example sending a request using DeleteObjectsRequest. // req := client.DeleteObjectsRequest(params) diff --git a/service/s3/api_op_DeletePublicAccessBlock.go b/service/s3/api_op_DeletePublicAccessBlock.go index 59046f1edfe..ee321ea67d0 100644 --- a/service/s3/api_op_DeletePublicAccessBlock.go +++ b/service/s3/api_op_DeletePublicAccessBlock.go @@ -77,7 +77,21 @@ const opDeletePublicAccessBlock = "DeletePublicAccessBlock" // DeletePublicAccessBlockRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Removes the PublicAccessBlock configuration from an Amazon S3 bucket. +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. In order +// to use this operation, you must have the s3:PutBucketPublicAccessBlock permission. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock +// +// * PutPublicAccessBlock +// +// * GetBucketPolicyStatus // // // Example sending a request using DeletePublicAccessBlockRequest. // req := client.DeletePublicAccessBlockRequest(params) diff --git a/service/s3/api_op_GetBucketAccelerateConfiguration.go b/service/s3/api_op_GetBucketAccelerateConfiguration.go index c1e566ca3ac..785fa8952e0 100644 --- a/service/s3/api_op_GetBucketAccelerateConfiguration.go +++ b/service/s3/api_op_GetBucketAccelerateConfiguration.go @@ -85,7 +85,32 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // GetBucketAccelerateConfigurationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns the accelerate configuration of a bucket. +// This implementation of the GET operation uses the accelerate subresource +// to return the Transfer Acceleration state of a bucket, which is either Enabled +// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled +// or Suspended by using the PutBucketAccelerateConfiguration operation. +// +// A GET accelerate request does not return a state value for a bucket that +// has no transfer acceleration state. A bucket has no Transfer Acceleration +// state, if a state has never been set on the bucket. +// +// For more information on transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev//transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketAccelerateConfiguration // // // Example sending a request using GetBucketAccelerateConfigurationRequest. // req := client.GetBucketAccelerateConfigurationRequest(params) diff --git a/service/s3/api_op_GetBucketAcl.go b/service/s3/api_op_GetBucketAcl.go index 224b69de36f..b43c6999826 100644 --- a/service/s3/api_op_GetBucketAcl.go +++ b/service/s3/api_op_GetBucketAcl.go @@ -13,6 +13,8 @@ import ( type GetBucketAclInput struct { _ struct{} `type:"structure"` + // Specifies the S3 bucket whose ACL is being requested. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -61,6 +63,7 @@ type GetBucketAclOutput struct { // A list of grants. Grants []Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` } @@ -97,7 +100,15 @@ const opGetBucketAcl = "GetBucketAcl" // GetBucketAclRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Gets the access control policy for the bucket. +// This implementation of the GET operation uses the acl subresource to return +// the access control list (ACL) of a bucket. To use GET to return the ACL of +// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// Related Resources +// +// * // // // Example sending a request using GetBucketAclRequest. // req := client.GetBucketAclRequest(params) diff --git a/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/service/s3/api_op_GetBucketAnalyticsConfiguration.go index bb5a58c5d00..e2096d48926 100644 --- a/service/s3/api_op_GetBucketAnalyticsConfiguration.go +++ b/service/s3/api_op_GetBucketAnalyticsConfiguration.go @@ -100,8 +100,27 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // GetBucketAnalyticsConfigurationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Gets an analytics configuration for the bucket (specified by the analytics -// configuration ID). +// This implementation of the GET operation returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * +// +// * +// +// * // // // Example sending a request using GetBucketAnalyticsConfigurationRequest. // req := client.GetBucketAnalyticsConfigurationRequest(params) diff --git a/service/s3/api_op_GetBucketCors.go b/service/s3/api_op_GetBucketCors.go index d990590d8dd..3e9079a19bd 100644 --- a/service/s3/api_op_GetBucketCors.go +++ b/service/s3/api_op_GetBucketCors.go @@ -13,6 +13,8 @@ import ( type GetBucketCorsInput struct { _ struct{} `type:"structure"` + // The bucket name for which to get the cors configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -58,6 +60,8 @@ func (s GetBucketCorsInput) MarshalFields(e protocol.FieldEncoder) error { type GetBucketCorsOutput struct { _ struct{} `type:"structure"` + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. CORSRules []CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` } @@ -88,7 +92,20 @@ const opGetBucketCors = "GetBucketCors" // GetBucketCorsRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns the CORS configuration for the bucket. +// Returns the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// To learn more cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html)Enabling +// Cross-Origin Resource Sharing. +// +// The following operations are related to GetBucketCors: +// +// * PutBucketCors +// +// * DeleteBucketCors // // // Example sending a request using GetBucketCorsRequest. // req := client.GetBucketCorsRequest(params) diff --git a/service/s3/api_op_GetBucketEncryption.go b/service/s3/api_op_GetBucketEncryption.go index bb2bf761279..5679d69c8fb 100644 --- a/service/s3/api_op_GetBucketEncryption.go +++ b/service/s3/api_op_GetBucketEncryption.go @@ -86,7 +86,21 @@ const opGetBucketEncryption = "GetBucketEncryption" // GetBucketEncryptionRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns the server-side encryption configuration of a bucket. +// Returns the default encryption configuration for an Amazon S3 bucket. For +// information about the Amazon S3 default encryption feature, see Amazon S3 +// Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// +// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to GetBucketEncryption: +// +// * PutBucketEncryption +// +// * DeleteBucketEncryption // // // Example sending a request using GetBucketEncryptionRequest. // req := client.GetBucketEncryptionRequest(params) diff --git a/service/s3/api_op_GetBucketInventoryConfiguration.go b/service/s3/api_op_GetBucketInventoryConfiguration.go index 7201e65b6d1..6fa9794a58b 100644 --- a/service/s3/api_op_GetBucketInventoryConfiguration.go +++ b/service/s3/api_op_GetBucketInventoryConfiguration.go @@ -100,8 +100,25 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // GetBucketInventoryConfigurationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns an inventory configuration (identified by the inventory ID) from -// the bucket. +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// The following operations are related to GetBucketInventoryConfiguration: +// +// * DeleteBucketInventoryConfiguration +// +// * ListBucketInventoryConfigurations +// +// * PutBucketInventoryConfiguration // // // Example sending a request using GetBucketInventoryConfigurationRequest. // req := client.GetBucketInventoryConfigurationRequest(params) diff --git a/service/s3/api_op_GetBucketLifecycle.go b/service/s3/api_op_GetBucketLifecycle.go index 20bb58f6723..031a5b746e4 100644 --- a/service/s3/api_op_GetBucketLifecycle.go +++ b/service/s3/api_op_GetBucketLifecycle.go @@ -13,6 +13,8 @@ import ( type GetBucketLifecycleInput struct { _ struct{} `type:"structure"` + // The name of the bucket for which to the the lifecycle information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -58,6 +60,7 @@ func (s GetBucketLifecycleInput) MarshalFields(e protocol.FieldEncoder) error { type GetBucketLifecycleOutput struct { _ struct{} `type:"structure"` + // Container for a lifecycle rule. Rules []Rule `locationName:"Rule" type:"list" flattened:"true"` } @@ -88,7 +91,33 @@ const opGetBucketLifecycle = "GetBucketLifecycle" // GetBucketLifecycleRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// No longer used, see the GetBucketLifecycleConfiguration operation. +// +// For an updated version of this API, see GetBucketLifecycleConfiguration. +// If you configured a bucket lifecycle using the filter element, you should +// the updated version of this topic. This topic is provided for backward compatibility. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycle has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycle: +// +// * GetBucketLifecycleConfiguration +// +// * PutBucketLifecycle +// +// * DeleteBucketLifecycle // // // Example sending a request using GetBucketLifecycleRequest. // req := client.GetBucketLifecycleRequest(params) diff --git a/service/s3/api_op_GetBucketLifecycleConfiguration.go b/service/s3/api_op_GetBucketLifecycleConfiguration.go index 4b081e7fb0b..761f44a41bd 100644 --- a/service/s3/api_op_GetBucketLifecycleConfiguration.go +++ b/service/s3/api_op_GetBucketLifecycleConfiguration.go @@ -13,6 +13,8 @@ import ( type GetBucketLifecycleConfigurationInput struct { _ struct{} `type:"structure"` + // The name of the bucket for which to the the lifecycle information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -58,6 +60,7 @@ func (s GetBucketLifecycleConfigurationInput) MarshalFields(e protocol.FieldEnco type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` + // Container for a lifecycle rule. Rules []LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` } @@ -88,7 +91,37 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // GetBucketLifecycleConfigurationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns the lifecycle configuration information set on the bucket. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The response describes +// the new filter element that you can use to specify a filter to select a subset +// of objects to which the rule applies. If you are still using previous version +// of the lifecycle configuration, it works. For the earlier API description, +// see GetBucketLifecycle. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission, by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycleConfiguration has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * GetBucketLifecycle +// +// * PutBucketLifecycle +// +// * DeleteBucketLifecycle // // // Example sending a request using GetBucketLifecycleConfigurationRequest. // req := client.GetBucketLifecycleConfigurationRequest(params) diff --git a/service/s3/api_op_GetBucketLocation.go b/service/s3/api_op_GetBucketLocation.go index 4b3cc0dd230..82bca7b5fce 100644 --- a/service/s3/api_op_GetBucketLocation.go +++ b/service/s3/api_op_GetBucketLocation.go @@ -13,6 +13,8 @@ import ( type GetBucketLocationInput struct { _ struct{} `type:"structure"` + // The name of the bucket for which to get the location. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -58,6 +60,8 @@ func (s GetBucketLocationInput) MarshalFields(e protocol.FieldEncoder) error { type GetBucketLocationOutput struct { _ struct{} `type:"structure"` + // Specifies the region where the bucket resides. For a list of all the Amazon + // S3 supported location constraints by region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). LocationConstraint BucketLocationConstraint `type:"string" enum:"true"` } @@ -82,7 +86,17 @@ const opGetBucketLocation = "GetBucketLocation" // GetBucketLocationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns the region the bucket resides in. +// Returns the region the bucket resides in. You set the bucket's region using +// the LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket. +// +// To use this implementation of the operation, you must be the bucket owner. +// +// The following operations are related to GetBucketLocation: +// +// * GetObject +// +// * CreateBucket // // // Example sending a request using GetBucketLocationRequest. // req := client.GetBucketLocationRequest(params) diff --git a/service/s3/api_op_GetBucketLogging.go b/service/s3/api_op_GetBucketLogging.go index 99999c0bf2b..7f8ab969370 100644 --- a/service/s3/api_op_GetBucketLogging.go +++ b/service/s3/api_op_GetBucketLogging.go @@ -13,6 +13,8 @@ import ( type GetBucketLoggingInput struct { _ struct{} `type:"structure"` + // The bucket name for which to get the logging information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -89,6 +91,12 @@ const opGetBucketLogging = "GetBucketLogging" // Returns the logging status of a bucket and the permissions users have to // view and modify that status. To use GET, you must be the bucket owner. // +// The following operations are related to GetBucketLogging: +// +// * CreateBucket +// +// * PutBucketLogging +// // // Example sending a request using GetBucketLoggingRequest. // req := client.GetBucketLoggingRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_GetBucketMetricsConfiguration.go b/service/s3/api_op_GetBucketMetricsConfiguration.go index a31844b1e24..f8a17e018de 100644 --- a/service/s3/api_op_GetBucketMetricsConfiguration.go +++ b/service/s3/api_op_GetBucketMetricsConfiguration.go @@ -101,7 +101,26 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // Amazon Simple Storage Service. // // Gets a metrics configuration (specified by the metrics configuration ID) -// from the bucket. +// from the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to GetBucketMetricsConfiguration: +// +// * PutBucketMetricsConfiguration +// +// * DeleteBucketMetricsConfiguration +// +// * ListBucketMetricsConfigurations +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) // // // Example sending a request using GetBucketMetricsConfigurationRequest. // req := client.GetBucketMetricsConfigurationRequest(params) diff --git a/service/s3/api_op_GetBucketNotification.go b/service/s3/api_op_GetBucketNotification.go index a04bd3094e5..92836b3e9df 100644 --- a/service/s3/api_op_GetBucketNotification.go +++ b/service/s3/api_op_GetBucketNotification.go @@ -13,7 +13,7 @@ import ( type GetBucketNotificationInput struct { _ struct{} `type:"structure"` - // Name of the bucket to get the notification configuration for. + // Name of the bucket for which to get the notification configuration // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -60,10 +60,17 @@ func (s GetBucketNotificationInput) MarshalFields(e protocol.FieldEncoder) error type GetBucketNotificationOutput struct { _ struct{} `type:"structure"` + // Container for specifying the AWS Lambda notification configuration. CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + // This data type is deprecated. This data type specifies the configuration + // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue + // when Amazon S3 detects specified events. QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + // This data type is deperecated. A container for specifying the configuration + // for publication of messages to an Amazon Simple Notification Service (Amazon + // SNS) topic when Amazon S3 detects specified events. TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` } @@ -100,7 +107,7 @@ const opGetBucketNotification = "GetBucketNotification" // GetBucketNotificationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// No longer used, see the GetBucketNotificationConfiguration operation. +// No longer used, see GetBucketNotificationConfiguration. // // // Example sending a request using GetBucketNotificationRequest. // req := client.GetBucketNotificationRequest(params) diff --git a/service/s3/api_op_GetBucketNotificationConfiguration.go b/service/s3/api_op_GetBucketNotificationConfiguration.go index 7796752bea2..a1b2e17ad07 100644 --- a/service/s3/api_op_GetBucketNotificationConfiguration.go +++ b/service/s3/api_op_GetBucketNotificationConfiguration.go @@ -13,7 +13,7 @@ import ( type GetBucketNotificationConfigurationInput struct { _ struct{} `type:"structure"` - // Name of the bucket to get the notification configuration for. + // Name of the bucket for which to get the notification configuration // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -128,6 +128,22 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration // // Returns the notification configuration of a bucket. // +// If notifications are not enabled on the bucket, the operation returns an +// empty NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant permission +// to other users to read this configuration with the s3:GetBucketNotification +// permission. +// +// For more information about setting and reading the notification configuration +// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketNotification: +// +// * PutBucketNotification +// // // Example sending a request using GetBucketNotificationConfigurationRequest. // req := client.GetBucketNotificationConfigurationRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_GetBucketPolicy.go b/service/s3/api_op_GetBucketPolicy.go index a750709f906..e8a496754a0 100644 --- a/service/s3/api_op_GetBucketPolicy.go +++ b/service/s3/api_op_GetBucketPolicy.go @@ -13,6 +13,8 @@ import ( type GetBucketPolicyInput struct { _ struct{} `type:"structure"` + // The bucket name for which to get the bucket policy. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -83,7 +85,26 @@ const opGetBucketPolicy = "GetBucketPolicy" // GetBucketPolicyRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns the policy of a specified bucket. +// Returns the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the GetBucketPolicy permissions on the specified bucket and belong +// to the bucket owner's account in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketPolicy: +// +// * GetObject // // // Example sending a request using GetBucketPolicyRequest. // req := client.GetBucketPolicyRequest(params) diff --git a/service/s3/api_op_GetBucketPolicyStatus.go b/service/s3/api_op_GetBucketPolicyStatus.go index 16e814b5fb9..3524f65e657 100644 --- a/service/s3/api_op_GetBucketPolicyStatus.go +++ b/service/s3/api_op_GetBucketPolicyStatus.go @@ -86,7 +86,22 @@ const opGetBucketPolicyStatus = "GetBucketPolicyStatus" // Amazon Simple Storage Service. // // Retrieves the policy status for an Amazon S3 bucket, indicating whether the -// bucket is public. +// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For more information about when Amazon S3 considers a bucket public, see +// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetBucketPolicyStatus: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock +// +// * PutPublicAccessBlock +// +// * DeletePublicAccessBlock // // // Example sending a request using GetBucketPolicyStatusRequest. // req := client.GetBucketPolicyStatusRequest(params) diff --git a/service/s3/api_op_GetBucketReplication.go b/service/s3/api_op_GetBucketReplication.go index 699ef16e339..b78f7776713 100644 --- a/service/s3/api_op_GetBucketReplication.go +++ b/service/s3/api_op_GetBucketReplication.go @@ -13,6 +13,8 @@ import ( type GetBucketReplicationInput struct { _ struct{} `type:"structure"` + // The bucket name for which to get the replication information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -90,6 +92,28 @@ const opGetBucketReplication = "GetBucketReplication" // to all Amazon S3 systems. Therefore, a get request soon after put or delete // can return a wrong result. // +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html). +// +// This operation requires permissions for the s3:GetReplicationConfiguration +// action. For more information about permissions, see Using Bucket Policies +// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// If you include the Filter element in a replication configuration, you must +// also include the DeleteMarkerReplication and Priority elements. The response +// also returns those elements. +// +// GetBucketReplication has the following special error: +// +// * Error code: NoSuchReplicationConfiguration Description: There is no +// replication configuration with that name. HTTP Status Code: 404 Not Found +// SOAP Fault Code Prefix: Client +// +// The following operations are related to GetBucketReplication: +// +// * PutBucketReplication +// +// * DeleteBucketReplication +// // // Example sending a request using GetBucketReplicationRequest. // req := client.GetBucketReplicationRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_GetBucketRequestPayment.go b/service/s3/api_op_GetBucketRequestPayment.go index 4bfdf809467..b290270a1e9 100644 --- a/service/s3/api_op_GetBucketRequestPayment.go +++ b/service/s3/api_op_GetBucketRequestPayment.go @@ -13,6 +13,8 @@ import ( type GetBucketRequestPaymentInput struct { _ struct{} `type:"structure"` + // The name of the bucket for which to get the payment request configuration + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -83,7 +85,13 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment" // GetBucketRequestPaymentRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns the request payment configuration of a bucket. +// Returns the request payment configuration of a bucket. To use this version +// of the operation, you must be the bucket owner. For more information, see +// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to GetBucketRequestPayment: +// +// * ListObjects // // // Example sending a request using GetBucketRequestPaymentRequest. // req := client.GetBucketRequestPaymentRequest(params) diff --git a/service/s3/api_op_GetBucketTagging.go b/service/s3/api_op_GetBucketTagging.go index b7414f512b0..532baba3fb7 100644 --- a/service/s3/api_op_GetBucketTagging.go +++ b/service/s3/api_op_GetBucketTagging.go @@ -13,6 +13,8 @@ import ( type GetBucketTaggingInput struct { _ struct{} `type:"structure"` + // The name of the bucket for which to get the tagging information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -58,6 +60,8 @@ func (s GetBucketTaggingInput) MarshalFields(e protocol.FieldEncoder) error { type GetBucketTaggingOutput struct { _ struct{} `type:"structure"` + // Contains the tag set. + // // TagSet is a required field TagSet []Tag `locationNameList:"Tag" type:"list" required:"true"` } @@ -91,6 +95,21 @@ const opGetBucketTagging = "GetBucketTagging" // // Returns the tag set associated with the bucket. // +// To use this operation, you must have permission to perform the s3:GetBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// GetBucketTagging has the following special error: +// +// * Error code: NoSuchTagSetError Description: There is no tag set associated +// with the bucket. +// +// The following operations are related to GetBucketTagging: +// +// * PutBucketTagging +// +// * DeleteBucketTagging +// // // Example sending a request using GetBucketTaggingRequest. // req := client.GetBucketTaggingRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_GetBucketVersioning.go b/service/s3/api_op_GetBucketVersioning.go index d68ecdd4828..242e4e89eba 100644 --- a/service/s3/api_op_GetBucketVersioning.go +++ b/service/s3/api_op_GetBucketVersioning.go @@ -13,6 +13,8 @@ import ( type GetBucketVersioningInput struct { _ struct{} `type:"structure"` + // The name of the bucket for which to get the versioning information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -96,6 +98,20 @@ const opGetBucketVersioning = "GetBucketVersioning" // // Returns the versioning state of a bucket. // +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning +// state, i.e., if the MFA Delete status is enabled, the bucket owner must use +// an authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning: +// +// * GetObject +// +// * PutObject +// +// * DeleteObject +// // // Example sending a request using GetBucketVersioningRequest. // req := client.GetBucketVersioningRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_GetBucketWebsite.go b/service/s3/api_op_GetBucketWebsite.go index 8de1eb87e2e..52ec45902a3 100644 --- a/service/s3/api_op_GetBucketWebsite.go +++ b/service/s3/api_op_GetBucketWebsite.go @@ -13,6 +13,8 @@ import ( type GetBucketWebsiteInput struct { _ struct{} `type:"structure"` + // The bucket name for which to get the website configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -58,14 +60,17 @@ func (s GetBucketWebsiteInput) MarshalFields(e protocol.FieldEncoder) error { type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` + // The name of the error document for the website. ErrorDocument *ErrorDocument `type:"structure"` + // The name of the index document for the website. IndexDocument *IndexDocument `type:"structure"` // Specifies the redirect behavior of all requests to a website endpoint of // an Amazon S3 bucket. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + // Rules that define when a redirect is applied and the redirect behavior. RoutingRules []RoutingRule `locationNameList:"RoutingRule" type:"list"` } @@ -114,7 +119,21 @@ const opGetBucketWebsite = "GetBucketWebsite" // GetBucketWebsiteRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns the website configuration for a bucket. +// Returns the website configuration for a bucket. To host website on Amazon +// S3, you can configure a bucket as website by adding a website configuration. +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This GET operation requires the S3:GetBucketWebsite permission. By default, +// only the bucket owner can read the bucket website configuration. However, +// bucket owners can allow other users to read the website configuration by +// writing a bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to DeleteBucketWebsite +// +// * DeleteBucketWebsite +// +// * PutBucketWebsite // // // Example sending a request using GetBucketWebsiteRequest. // req := client.GetBucketWebsiteRequest(params) diff --git a/service/s3/api_op_GetObject.go b/service/s3/api_op_GetObject.go index 03d126b929b..240a8f473e3 100644 --- a/service/s3/api_op_GetObject.go +++ b/service/s3/api_op_GetObject.go @@ -15,6 +15,8 @@ import ( type GetObjectInput struct { _ struct{} `type:"structure"` + // The bucket name containing the object. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -34,6 +36,8 @@ type GetObjectInput struct { // otherwise return a 412 (precondition failed). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + // Key of the object to get. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -255,6 +259,7 @@ func (s GetObjectInput) MarshalFields(e protocol.FieldEncoder) error { type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` + // Indicates that a range of bytes was specifed. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // Object data. @@ -316,15 +321,17 @@ type GetObjectOutput struct { // returned if you have permission to view an object's legal hold status. ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"true"` - // The object lock mode currently in place for this object. + // The Object Lock mode currently in place for this object. ObjectLockMode ObjectLockMode `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"true"` - // The date and time when this object's object lock will expire. + // The date and time when this object's Object Lock will expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + // Amazon S3 can return this if your request involves a bucket that is either + // a source or destination in a replication rule. ReplicationStatus ReplicationStatus `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"true"` // If present, indicates that the requester was successfully charged for the @@ -345,14 +352,16 @@ type GetObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (KMS) customer + // master key (CMK) that was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). ServerSideEncryption ServerSideEncryption `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"true"` + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for Standard storage class objects. StorageClass StorageClass `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"true"` // The number of tags, if any, on the object. @@ -571,7 +580,130 @@ const opGetObject = "GetObject" // GetObjectRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Retrieves objects from Amazon S3. +// Retrieves objects from Amazon S3. To use GET, you must have READ access to +// the object. If you grant READ access to the anonymous user, you can return +// the object without using an authorization header. +// +// An Amazon S3 bucket has no directory hierarchy such as you would find in +// a typical computer file system. You can, however, create a logical hierarchy +// by using object key names that imply a folder structure. For example, instead +// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. +// +// To get an object from such a logical hierarchy, specify the full key name +// for the object in the GET operation. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the resource +// as /photos/2006/February/sample.jpg. For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, +// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For +// more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +// +// To distribute large files to many people, you can save bandwidth costs by +// using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// For more information about returning the ACL of an object, see GetObjectAcl. +// +// If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE +// storage classes, before you can retrieve the object you must first restore +// a copy using . Otherwise, this operation returns an InvalidObjectStateError +// error. For information about restoring archived objects, see Restoring Archived +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you GET the object, you must use the following headers: +// +// * x-amz-server-side​-encryption​-customer-algorithm +// +// * x-amz-server-side​-encryption​-customer-key +// +// * x-amz-server-side​-encryption​-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging +// action), the response also returns the x-amz-tagging-count header that provides +// the count of number of tags associated with the object. You can use GetObjectTagging +// to retrieve the tag set associated with an object. +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will +// return an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 will return +// an HTTP status code 403 ("access denied") error. +// +// Versioning +// +// By default, the GET operation returns the current version of an object. To +// return a different version, use the versionId subresource. +// +// If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in the +// response. +// +// For more information about versioning, see PutBucketVersioning. +// +// Overriding Response Header Values +// +// There are times when you want to override certain response header values +// in a GET response. For example, you might override the Content-Disposition +// response header value in your GET request. +// +// You can override values for a set of response headers using the following +// query parameters. These response header values are sent only on a successful +// request, that is, when status code 200 OK is returned. The set of headers +// you can override using these parameters is a subset of the headers that Amazon +// S3 accepts when you create an object. The response headers that you can override +// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, +// Content-Disposition, and Content-Encoding. To override these header values +// in the GET response, you use the following request parameters. +// +// You must sign the request, either using an Authorization header or a presigned +// URL, when using these parameters. They cannot be used with an unsigned (anonymous) +// request. +// +// * response-content-type +// +// * response-content-language +// +// * response-expires +// +// * response-cache-control +// +// * response-content-disposition +// +// * response-content-encoding +// +// Additional Considerations about Request Headers +// +// If both of the If-Match and If-Unmodified-Since headers are present in the +// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since +// condition evaluates to false; then, S3 returns 200 OK and the data requested. +// +// If both of the If-None-Match and If-Modified-Since headers are present in +// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since +// condition evaluates to true; then, S3 returns 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// The following operations are related to GetObject: +// +// * ListBuckets +// +// * GetObjectAcl // // // Example sending a request using GetObjectRequest. // req := client.GetObjectRequest(params) diff --git a/service/s3/api_op_GetObjectAcl.go b/service/s3/api_op_GetObjectAcl.go index 074e707d419..3463cda5981 100644 --- a/service/s3/api_op_GetObjectAcl.go +++ b/service/s3/api_op_GetObjectAcl.go @@ -13,9 +13,13 @@ import ( type GetObjectAclInput struct { _ struct{} `type:"structure"` + // The bucket name of the object for which to get the ACL information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The key of the object for which to get the ACL information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -98,6 +102,7 @@ type GetObjectAclOutput struct { // A list of grants. Grants []Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` // If present, indicates that the requester was successfully charged for the @@ -144,7 +149,21 @@ const opGetObjectAcl = "GetObjectAcl" // GetObjectAclRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns the access control list (ACL) of an object. +// Returns the access control list (ACL) of an object. To use this operation, +// you must have READ_ACP access to the object. +// +// Versioning +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId subresource. +// +// The following operations are related to GetObjectAcl: +// +// * GetObject +// +// * DeleteObject +// +// * PutObject // // // Example sending a request using GetObjectAclRequest. // req := client.GetObjectAclRequest(params) diff --git a/service/s3/api_op_GetObjectLegalHold.go b/service/s3/api_op_GetObjectLegalHold.go index 12511a1ca71..b9d61a9148e 100644 --- a/service/s3/api_op_GetObjectLegalHold.go +++ b/service/s3/api_op_GetObjectLegalHold.go @@ -124,7 +124,8 @@ const opGetObjectLegalHold = "GetObjectLegalHold" // GetObjectLegalHoldRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Gets an object's current Legal Hold status. +// Gets an object's current Legal Hold status. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // // Example sending a request using GetObjectLegalHoldRequest. // req := client.GetObjectLegalHoldRequest(params) diff --git a/service/s3/api_op_GetObjectLockConfiguration.go b/service/s3/api_op_GetObjectLockConfiguration.go index 22f237c9634..cd79862c5f9 100644 --- a/service/s3/api_op_GetObjectLockConfiguration.go +++ b/service/s3/api_op_GetObjectLockConfiguration.go @@ -13,7 +13,7 @@ import ( type GetObjectLockConfigurationInput struct { _ struct{} `type:"structure"` - // The bucket whose object lock configuration you want to retrieve. + // The bucket whose Object Lock configuration you want to retrieve. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -60,7 +60,7 @@ func (s GetObjectLockConfigurationInput) MarshalFields(e protocol.FieldEncoder) type GetObjectLockConfigurationOutput struct { _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` - // The specified bucket's object lock configuration. + // The specified bucket's Object Lock configuration. ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` } @@ -85,9 +85,10 @@ const opGetObjectLockConfiguration = "GetObjectLockConfiguration" // GetObjectLockConfigurationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Gets the object lock configuration for a bucket. The rule specified in the -// object lock configuration will be applied by default to every new object -// placed in the specified bucket. +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // // Example sending a request using GetObjectLockConfigurationRequest. // req := client.GetObjectLockConfigurationRequest(params) diff --git a/service/s3/api_op_GetObjectRetention.go b/service/s3/api_op_GetObjectRetention.go index 6bb9cafa94a..d809154162f 100644 --- a/service/s3/api_op_GetObjectRetention.go +++ b/service/s3/api_op_GetObjectRetention.go @@ -124,7 +124,8 @@ const opGetObjectRetention = "GetObjectRetention" // GetObjectRetentionRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Retrieves an object's retention settings. +// Retrieves an object's retention settings. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // // Example sending a request using GetObjectRetentionRequest. // req := client.GetObjectRetentionRequest(params) diff --git a/service/s3/api_op_GetObjectTagging.go b/service/s3/api_op_GetObjectTagging.go index 645cdef789b..d12723716f8 100644 --- a/service/s3/api_op_GetObjectTagging.go +++ b/service/s3/api_op_GetObjectTagging.go @@ -13,12 +13,17 @@ import ( type GetObjectTaggingInput struct { _ struct{} `type:"structure"` + // The bucket name containing the object for which to get the tagging information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which to get the tagging information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // The versionId of the object for which to get the tagging information. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -82,9 +87,12 @@ func (s GetObjectTaggingInput) MarshalFields(e protocol.FieldEncoder) error { type GetObjectTaggingOutput struct { _ struct{} `type:"structure"` + // Contains the tag set. + // // TagSet is a required field TagSet []Tag `locationNameList:"Tag" type:"list" required:"true"` + // The versionId of the object for which you got the tagging information. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -121,7 +129,25 @@ const opGetObjectTagging = "GetObjectTagging" // GetObjectTaggingRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns the tag-set of an object. +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the s3:GetObjectTagging +// action. By default, the GET operation returns information about current version +// of an object. For a versioned bucket, you can have multiple versions of an +// object in your bucket. To retrieve tags of any other version, use the versionId +// query parameter. You also need permission for the s3:GetObjectVersionTagging +// action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// The following operation is related to GetObjectTagging: +// +// * PutObjectTagging // // // Example sending a request using GetObjectTaggingRequest. // req := client.GetObjectTaggingRequest(params) diff --git a/service/s3/api_op_GetObjectTorrent.go b/service/s3/api_op_GetObjectTorrent.go index 6096b0dddd7..c308ba1d24c 100644 --- a/service/s3/api_op_GetObjectTorrent.go +++ b/service/s3/api_op_GetObjectTorrent.go @@ -14,9 +14,14 @@ import ( type GetObjectTorrentInput struct { _ struct{} `type:"structure"` + // The name of the bucket containing the object for which to get the torrent + // files. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The object key for which to get the information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -87,6 +92,7 @@ func (s GetObjectTorrentInput) MarshalFields(e protocol.FieldEncoder) error { type GetObjectTorrentOutput struct { _ struct{} `type:"structure" payload:"Body"` + // A Bencoded dictionary as defined by the BitTorrent specification Body io.ReadCloser `type:"blob"` // If present, indicates that the requester was successfully charged for the @@ -116,7 +122,19 @@ const opGetObjectTorrent = "GetObjectTorrent" // GetObjectTorrentRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Return torrent files from a bucket. +// Return torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. For more information about BitTorrent, see +// Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// +// You can get torrent only for objects that are less than 5 GB in size and +// that are not encrypted using server-side encryption with customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// The following operation is related to GetObjectTorrent: +// +// * GetObject // // // Example sending a request using GetObjectTorrentRequest. // req := client.GetObjectTorrentRequest(params) diff --git a/service/s3/api_op_GetPublicAccessBlock.go b/service/s3/api_op_GetPublicAccessBlock.go index 35028afa008..5a5c961ade2 100644 --- a/service/s3/api_op_GetPublicAccessBlock.go +++ b/service/s3/api_op_GetPublicAccessBlock.go @@ -87,7 +87,30 @@ const opGetPublicAccessBlock = "GetPublicAccessBlock" // GetPublicAccessBlockRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. In +// order to use this operation, you must have the s3:GetBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock settings are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetPublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * PutPublicAccessBlock +// +// * GetPublicAccessBlock +// +// * DeletePublicAccessBlock // // // Example sending a request using GetPublicAccessBlockRequest. // req := client.GetPublicAccessBlockRequest(params) diff --git a/service/s3/api_op_HeadBucket.go b/service/s3/api_op_HeadBucket.go index ba14694acc3..091e01d545e 100644 --- a/service/s3/api_op_HeadBucket.go +++ b/service/s3/api_op_HeadBucket.go @@ -14,6 +14,8 @@ import ( type HeadBucketInput struct { _ struct{} `type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -76,7 +78,15 @@ const opHeadBucket = "HeadBucket" // Amazon Simple Storage Service. // // This operation is useful to determine if a bucket exists and you have permission -// to access it. +// to access it. The operation returns a 200 OK if the bucket exists and you +// have permission to access it. Otherwise, the operation might return responses +// such as 404 Not Found and 403 Forbidden. +// +// To use this operation, you must have permissions to perform the s3:ListBucket +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // // // Example sending a request using HeadBucketRequest. // req := client.HeadBucketRequest(params) diff --git a/service/s3/api_op_HeadObject.go b/service/s3/api_op_HeadObject.go index f9b36a12edd..f7e9f16c89b 100644 --- a/service/s3/api_op_HeadObject.go +++ b/service/s3/api_op_HeadObject.go @@ -14,6 +14,8 @@ import ( type HeadObjectInput struct { _ struct{} `type:"structure"` + // The name of the bucket containing the object. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -33,6 +35,8 @@ type HeadObjectInput struct { // otherwise return a 412 (precondition failed). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + // The object key. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -200,6 +204,7 @@ func (s HeadObjectInput) MarshalFields(e protocol.FieldEncoder) error { type HeadObjectOutput struct { _ struct{} `type:"structure"` + // Indicates that a range of bytes was specifed. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // Specifies caching behavior along the request/reply chain. @@ -251,26 +256,69 @@ type HeadObjectOutput struct { // you can create metadata whose values are not legal HTTP headers. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - // The Legal Hold status for the specified object. + // Specifies whether a legal hold is in effect for this object. This header + // is only returned if the requester has the s3:GetObjectLegalHold permission. + // This header is not returned if the specified version of this object has never + // had a legal hold applied. For more information about S3 Object Lock, see + // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"true"` - // The object lock mode currently in place for this object. + // The Object Lock mode, if any, that's in effect for this object. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockMode ObjectLockMode `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"true"` - // The date and time when this object's object lock expires. + // The date and time when the Object Lock retention period expires. This header + // is only returned if the requester has the s3:GetObjectRetention permission. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or destination in a replication rule. + // + // In replication you have a source bucket on which you configure replication + // and destination bucket where Amazon S3 stores object replicas. When you request + // an object (GetObject) or object metadata (HeadObject) from these buckets, + // Amazon S3 will return the x-amz-replication-status header in the response + // as follows: + // + // * If requesting object from the source bucket — Amazon S3 will return + // the x-amz-replication-status header if object in your request is eligible + // for replication. For example, suppose in your replication configuration + // you specify object prefix "TaxDocs" requesting Amazon S3 to replicate + // objects with key prefix "TaxDocs". Then any objects you upload with this + // key name prefix, for example "TaxDocs/document1.pdf", is eligible for + // replication. For any object request with this key name prefix Amazon S3 + // will return the x-amz-replication-status header with value PENDING, COMPLETED + // or FAILED indicating object replication status. + // + // * If requesting object from the destination bucket — Amazon S3 will + // return the x-amz-replication-status header with value REPLICA if object + // in your request is a replica that Amazon S3 created. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). ReplicationStatus ReplicationStatus `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"true"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged RequestCharged `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"true"` - // Provides information about object restoration operation and expiration time - // of the restored object copy. + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value ongoing-request="true". + // + // For more information about archiving objects, see Transitioning Objects: + // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, @@ -283,14 +331,20 @@ type HeadObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (KMS) customer + // master key (CMK) that was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If the object is stored using server-side encryption either with an AWS KMS + // customer master key (CMK) or an Amazon S3-managed encryption key, the response + // includes this header with the value of the Server-side encryption algorithm + // used when storing this object in S3 (e.g., AES256, aws:kms). ServerSideEncryption ServerSideEncryption `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"true"` + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). StorageClass StorageClass `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"true"` // Version of the object. @@ -497,6 +551,63 @@ const opHeadObject = "HeadObject" // object itself. This operation is useful if you're only interested in an object's // metadata. To use HEAD, you must have READ access to the object. // +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response +// body. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers: +// +// * x-amz-server-side​-encryption​-customer-algorithm +// +// * x-amz-server-side​-encryption​-customer-key +// +// * x-amz-server-side​-encryption​-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Consider the following when using request headers: +// +// * Consideration 1 – If both of the If-Match and If-Unmodified-Since +// headers are present in the request as follows: If-Match condition evaluates +// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon +// S3 returns 200 OK and the data requested. +// +// * Consideration 2 – If both of the If-None-Match and If-Modified-Since +// headers are present in the request as follows: If-None-Match condition +// evaluates to false, and; If-Modified-Since condition evaluates to true; +// Then Amazon S3 returns the 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will +// return a HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 will return +// a HTTP status code 403 ("access denied") error. +// +// The following operation is related to HeadObject: +// +// * GetObject +// // See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses // for more information on returned errors. // diff --git a/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/service/s3/api_op_ListBucketAnalyticsConfigurations.go index e75648e626f..42604a12896 100644 --- a/service/s3/api_op_ListBucketAnalyticsConfigurations.go +++ b/service/s3/api_op_ListBucketAnalyticsConfigurations.go @@ -73,7 +73,8 @@ type ListBucketAnalyticsConfigurationsOutput struct { // The list of analytics configurations for a bucket. AnalyticsConfigurationList []AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` - // The ContinuationToken that represents where this request began. + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. ContinuationToken *string `type:"string"` // Indicates whether the returned list of analytics configurations is complete. @@ -132,7 +133,33 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" // ListBucketAnalyticsConfigurationsRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Lists the analytics configurations for the bucket. +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. You should always check the IsTruncated element +// in the response. If there are no more configurations to list, IsTruncated +// is set to false. If there are more configurations to list, IsTruncated is +// set to true, and there will be a value in NextContinuationToken. You use +// the NextContinuationToken value to continue the pagination of the list by +// passing the value in continuation-token in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to ListBucketAnalyticsConfigurations: +// +// * GetBucketAnalyticsConfiguration +// +// * DeleteBucketAnalyticsConfiguration +// +// * PutBucketAnalyticsConfiguration // // // Example sending a request using ListBucketAnalyticsConfigurationsRequest. // req := client.ListBucketAnalyticsConfigurationsRequest(params) diff --git a/service/s3/api_op_ListBucketInventoryConfigurations.go b/service/s3/api_op_ListBucketInventoryConfigurations.go index 9c71bd6defe..fc646f8ff35 100644 --- a/service/s3/api_op_ListBucketInventoryConfigurations.go +++ b/service/s3/api_op_ListBucketInventoryConfigurations.go @@ -79,8 +79,9 @@ type ListBucketInventoryConfigurationsOutput struct { // The list of inventory configurations for a bucket. InventoryConfigurationList []InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` - // Indicates whether the returned list of inventory configurations is truncated - // in this response. A value of true indicates that the list is truncated. + // Tells whether the returned list of inventory configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // is provided for a subsequent request. IsTruncated *bool `type:"boolean"` // The marker used to continue this inventory configuration listing. Use the @@ -134,7 +135,33 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // ListBucketInventoryConfigurationsRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns a list of inventory configurations for the bucket. +// Returns a list of inventory configurations for the bucket. You can have up +// to 1,000 analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// +// The following operations are related to ListBucketInventoryConfigurations: +// +// * GetBucketInventoryConfiguration +// +// * DeleteBucketInventoryConfiguration +// +// * PutBucketInventoryConfiguration // // // Example sending a request using ListBucketInventoryConfigurationsRequest. // req := client.ListBucketInventoryConfigurationsRequest(params) diff --git a/service/s3/api_op_ListBucketMetricsConfigurations.go b/service/s3/api_op_ListBucketMetricsConfigurations.go index 19f645db20d..ceb612e36f9 100644 --- a/service/s3/api_op_ListBucketMetricsConfigurations.go +++ b/service/s3/api_op_ListBucketMetricsConfigurations.go @@ -136,7 +136,34 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" // ListBucketMetricsConfigurationsRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Lists the metrics configurations for the bucket. +// Lists the metrics configurations for the bucket. The metrics configurations +// are only for the request metrics of the bucket and do not provide information +// on daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to ListBucketMetricsConfigurations: +// +// * PutBucketMetricsConfiguration +// +// * GetBucketMetricsConfiguration +// +// * DeleteBucketMetricsConfiguration // // // Example sending a request using ListBucketMetricsConfigurationsRequest. // req := client.ListBucketMetricsConfigurationsRequest(params) diff --git a/service/s3/api_op_ListBuckets.go b/service/s3/api_op_ListBuckets.go index 785fe573099..a39cb108908 100644 --- a/service/s3/api_op_ListBuckets.go +++ b/service/s3/api_op_ListBuckets.go @@ -28,8 +28,10 @@ func (s ListBucketsInput) MarshalFields(e protocol.FieldEncoder) error { type ListBucketsOutput struct { _ struct{} `type:"structure"` + // The list of buckets owned by the requestor. Buckets []Bucket `locationNameList:"Bucket" type:"list"` + // The owner of the buckets listed. Owner *Owner `type:"structure"` } diff --git a/service/s3/api_op_ListMultipartUploads.go b/service/s3/api_op_ListMultipartUploads.go index 628a7801945..9121fe5e8a7 100644 --- a/service/s3/api_op_ListMultipartUploads.go +++ b/service/s3/api_op_ListMultipartUploads.go @@ -13,10 +13,19 @@ import ( type ListMultipartUploadsInput struct { _ struct{} `type:"structure"` + // Name of the bucket to which the multipart upload was initiated. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and + // the first occurrence of the delimiter after the prefix are grouped under + // a single result element, CommonPrefixes. If you don't specify the prefix + // parameter, then the substring starts at the beginning of the key. The keys + // that are grouped under CommonPrefixes result element are not returned elsewhere + // in the response. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies @@ -29,6 +38,13 @@ type ListMultipartUploadsInput struct { // Together with upload-id-marker, this parameter specifies the multipart upload // after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to + // the key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of multipart uploads, from 1 to 1,000, to return @@ -37,12 +53,16 @@ type ListMultipartUploadsInput struct { MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` // Lists in-progress uploads only for those keys that begin with the specified - // prefix. + // prefix. You can use prefixes to separate a bucket into different grouping + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter - // is ignored. + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` } @@ -126,11 +146,22 @@ type ListMultipartUploadsOutput struct { // Name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. CommonPrefixes []CommonPrefix `type:"list" flattened:"true"` + // Contains the delimiter you specified in the request. If you don't specify + // a delimiter in your request, this element is absent from the response. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. EncodingType EncodingType `type:"string" enum:"true"` // Indicates whether the returned list of multipart uploads is truncated. A @@ -161,6 +192,8 @@ type ListMultipartUploadsOutput struct { // Upload ID after which listing began. UploadIdMarker *string `type:"string"` + // Container for elements related to a particular multipart upload. A response + // can contain zero or more Upload elements. Uploads []MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` } @@ -270,7 +303,40 @@ const opListMultipartUploads = "ListMultipartUploads" // ListMultipartUploadsRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// This operation lists in-progress multipart uploads. +// This operation lists in-progress multipart uploads. An in-progress multipart +// upload is a multipart upload that has been initiated using the Initiate Multipart +// Upload request, but has not yet been completed or aborted. +// +// This operation returns at most 1,000 multipart uploads in the response. 1,000 +// multipart uploads is the maximum number of uploads a response can include, +// which is also the default value. You can further limit the number of uploads +// in a response by specifying the max-uploads parameter in the response. If +// additional multipart uploads satisfy the list criteria, the response will +// contain an IsTruncated element with the value true. To list the additional +// multipart uploads, use the key-marker and upload-id-marker request parameters. +// +// In the response, the uploads are sorted by key. If your application has initiated +// more than one multipart upload using the same object key, then uploads in +// the response are first sorted by key. Additionally, uploads are sorted in +// ascending order within each key by the upload initiation time. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListMultipartUploads: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * ListParts +// +// * AbortMultipartUpload // // // Example sending a request using ListMultipartUploadsRequest. // req := client.ListMultipartUploadsRequest(params) diff --git a/service/s3/api_op_ListObjectVersions.go b/service/s3/api_op_ListObjectVersions.go index 0222450c4d2..6b9effc1825 100644 --- a/service/s3/api_op_ListObjectVersions.go +++ b/service/s3/api_op_ListObjectVersions.go @@ -13,10 +13,16 @@ import ( type ListObjectVersionsInput struct { _ struct{} `type:"structure"` + // The name of the bucket that contains the objects. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // A delimiter is a character you use to group keys. + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes. These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies @@ -31,10 +37,17 @@ type ListObjectVersionsInput struct { KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // contain fewer keys but will never contain more. If additional keys satisfy + // the search criteria, but were not returned because max-keys was exceeded, + // the response contains true. To return the additional + // keys, see key-marker and version-id-marker. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - // Limits the response to keys that begin with the specified prefix. + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) You can use prefix with delimiter to roll + // up numerous objects into a single result under CommonPrefixes. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Specifies the object version you want to start listing from. @@ -118,13 +131,28 @@ func (s ListObjectVersionsInput) MarshalFields(e protocol.FieldEncoder) error { type ListObjectVersionsOutput struct { _ struct{} `type:"structure"` + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. CommonPrefixes []CommonPrefix `type:"list" flattened:"true"` + // Container for an object that is a delete marker. DeleteMarkers []DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + // The delimeter grouping the included keys. A delimiter is a character that + // you specify to group keys. All keys that contain the same string between + // the prefix and the first occurrence of the delimiter are grouped under a + // single result element in CommonPrefixes. These groups are counted as one + // result against the max-keys limitation. These keys are not returned elsewhere + // in the response. Delimiter *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. EncodingType EncodingType `type:"string" enum:"true"` // A flag that indicates whether or not Amazon S3 returned all of the results @@ -137,20 +165,30 @@ type ListObjectVersionsOutput struct { // Marks the last Key returned in a truncated response. KeyMarker *string `type:"string"` + // Specifies the maximum number of objects to return. MaxKeys *int64 `type:"integer"` + // Bucket owner's name. Name *string `type:"string"` - // Use this value for the key marker request parameter in a subsequent request. + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. + // Use this value for the key-marker request parameter in a subsequent request. NextKeyMarker *string `type:"string"` - // Use this value for the next version id marker parameter in a subsequent request. + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. NextVersionIdMarker *string `type:"string"` + // Selects objects that start with the value supplied by this parameter. Prefix *string `type:"string"` + // Marks the last version of the Key returned in a truncated response. VersionIdMarker *string `type:"string"` + // Container for version information. Versions []ObjectVersion `locationName:"Version" type:"list" flattened:"true"` } @@ -265,7 +303,24 @@ const opListObjectVersions = "ListObjectVersions" // ListObjectVersionsRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns metadata about all of the versions of objects in a bucket. +// Returns metadata about all of the versions of objects in a bucket. You can +// also use request parameters as selection criteria to return metadata about +// a subset of all the object versions. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// The following operations are related to ListObjectVersions: +// +// * ListObjectsV2 +// +// * GetObject +// +// * PutObject +// +// * DeleteObject // // // Example sending a request using ListObjectVersionsRequest. // req := client.ListObjectVersionsRequest(params) diff --git a/service/s3/api_op_ListObjects.go b/service/s3/api_op_ListObjects.go index 18a86a50e33..1d74f137e5c 100644 --- a/service/s3/api_op_ListObjects.go +++ b/service/s3/api_op_ListObjects.go @@ -13,6 +13,8 @@ import ( type ListObjectsInput struct { _ struct{} `type:"structure"` + // The name of the bucket containing the objects. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -120,10 +122,31 @@ func (s ListObjectsInput) MarshalFields(e protocol.FieldEncoder) error { type ListObjectsOutput struct { _ struct{} `type:"structure"` + // All of the keys rolled up in a common prefix count as a single return when + // calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. CommonPrefixes []CommonPrefix `type:"list" flattened:"true"` + // Metadata about each object returned. Contents []Object `type:"list" flattened:"true"` + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. @@ -133,10 +156,14 @@ type ListObjectsOutput struct { // that satisfied the search criteria. IsTruncated *bool `type:"boolean"` + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. Marker *string `type:"string"` + // The maximum number of keys returned in the response body. MaxKeys *int64 `type:"integer"` + // Name of the bucket. Name *string `type:"string"` // When response is truncated (the IsTruncated element value in the response @@ -148,6 +175,7 @@ type ListObjectsOutput struct { // subsequent request to get the next set of object keys. NextMarker *string `type:"string"` + // Keys that begin with the indicated prefix. Prefix *string `type:"string"` } @@ -240,7 +268,25 @@ const opListObjects = "ListObjects" // // Returns some or all (up to 1000) of the objects in a bucket. You can use // the request parameters as selection criteria to return a subset of the objects -// in a bucket. +// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// This API has been revised. We recommend that you use the newer version, ListObjectsV2, +// when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects. +// +// The following operations are related to ListObjects: +// +// * ListObjectsV2 +// +// * GetObject +// +// * PutObject +// +// * CreateBucket +// +// * ListBuckets // // // Example sending a request using ListObjectsRequest. // req := client.ListObjectsRequest(params) diff --git a/service/s3/api_op_ListObjectsV2.go b/service/s3/api_op_ListObjectsV2.go index b32cb0db506..9925e007f20 100644 --- a/service/s3/api_op_ListObjectsV2.go +++ b/service/s3/api_op_ListObjectsV2.go @@ -20,7 +20,7 @@ type ListObjectsV2Input struct { // ContinuationToken indicates Amazon S3 that the list is being continued on // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key + // key. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` // A delimiter is a character you use to group keys. @@ -47,7 +47,7 @@ type ListObjectsV2Input struct { RequestPayer RequestPayer `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"true"` // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket + // listing after this specified key. StartAfter can be any key in the bucket. StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` } @@ -140,26 +140,48 @@ func (s ListObjectsV2Input) MarshalFields(e protocol.FieldEncoder) error { type ListObjectsV2Output struct { _ struct{} `type:"structure"` + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by delimiter + // next occurrence of the string specified by a delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. CommonPrefixes []CommonPrefix `type:"list" flattened:"true"` // Metadata about each object returned. Contents []Object `type:"list" flattened:"true"` - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key + // If ContinuationToken was sent with the request, it is included in the response. ContinuationToken *string `type:"string"` - // A delimiter is a character you use to group keys. + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. Delimiter *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter. EncodingType EncodingType `type:"string" enum:"true"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. + // Set to false if all of the results were returned. Set to true if more keys + // are available to return. If the number of results exceeds that specified + // by MaxKeys, all of the results might not be returned. IsTruncated *bool `type:"boolean"` // KeyCount is the number of keys returned with this request. KeyCount will @@ -171,7 +193,7 @@ type ListObjectsV2Output struct { // contain fewer keys but will never contain more. MaxKeys *int64 `type:"integer"` - // Name of the bucket to list. + // Name of the bucket. Name *string `type:"string"` // NextContinuationToken is sent when isTruncated is true which means there @@ -180,11 +202,10 @@ type ListObjectsV2Output struct { // is obfuscated and is not a real key NextContinuationToken *string `type:"string"` - // Limits the response to keys that begin with the specified prefix. + // Keys that begin with the indicated prefix. Prefix *string `type:"string"` - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket + // If StartAfter was sent with the request, it is included in the response. StartAfter *string `type:"string"` } @@ -287,10 +308,34 @@ const opListObjectsV2 = "ListObjectsV2" // ListObjectsV2Request returns a request value for making API operation for // Amazon Simple Storage Service. // -// Returns some or all (up to 1000) of the objects in a bucket. You can use +// Returns some or all (up to 1,000) of the objects in a bucket. You can use // the request parameters as selection criteria to return a subset of the objects -// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend -// you use this revised API for new application development. +// in a bucket. A 200 OK response can contain valid or invalid XML. Make sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// To use thisoperation, you must have READ access to the bucket. +// +// To use this operation in an AWS Identity and Access Management (IAM) policy, +// you must have permissions to perform the s3:ListBucket action. The bucket +// owner has this permission by default and can grant this permission to others. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// This section describes the latest revision of the API. We recommend that +// you use this revised API for application development. For backward compatibility, +// Amazon S3 continues to support the prior version of this API, ListObjects. +// +// To get a list of your buckets, see ListBuckets. +// +// The following operations are related to ListObjectsV2: +// +// * GetObject +// +// * PutObject +// +// * CreateBucket // // // Example sending a request using ListObjectsV2Request. // req := client.ListObjectsV2Request(params) diff --git a/service/s3/api_op_ListParts.go b/service/s3/api_op_ListParts.go index 7aa6374e09b..8e6f1f17a17 100644 --- a/service/s3/api_op_ListParts.go +++ b/service/s3/api_op_ListParts.go @@ -14,9 +14,13 @@ import ( type ListPartsInput struct { _ struct{} `type:"structure"` + // Name of the bucket to which the parts are being uploaded.-> + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -121,20 +125,34 @@ func (s ListPartsInput) MarshalFields(e protocol.FieldEncoder) error { type ListPartsOutput struct { _ struct{} `type:"structure"` - // Date when multipart upload will become eligible for abort operation by lifecycle. + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, then the response includes this header indicating when + // the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // Name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` - // Identifies who initiated the multipart upload. + // Container element that identifies who initiated the multipart upload. If + // the initiator is an AWS account, this element provides the same information + // as the Owner element. If the initiator is an IAM User, then this element + // provides the user ARN and display name. Initiator *Initiator `type:"structure"` - // Indicates whether the returned list of parts is truncated. + // Indicates whether the returned list of parts is truncated. A true value indicates + // that the list was truncated. A list can be truncated if the number of parts + // exceeds the limit returned in the MaxParts element. IsTruncated *bool `type:"boolean"` // Object key for which the multipart upload was initiated. @@ -148,18 +166,26 @@ type ListPartsOutput struct { // in a subsequent request. NextPartNumberMarker *int64 `type:"integer"` + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. Owner *Owner `type:"structure"` - // Part number after which listing begins. + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. PartNumberMarker *int64 `type:"integer"` + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. Parts []Part `locationName:"Part" type:"list" flattened:"true"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged RequestCharged `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"true"` - // The class of storage used to store the object. + // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded + // object. StorageClass StorageClass `type:"string" enum:"true"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -280,6 +306,33 @@ const opListParts = "ListParts" // Amazon Simple Storage Service. // // Lists the parts that have been uploaded for a specific multipart upload. +// This operation must include the upload ID, which you obtain by sending the +// initiate multipart upload request (see CreateMultipartUpload). This request +// returns a maximum of 1,000 uploaded parts. The default number of parts returned +// is 1,000 parts. You can restrict the number of parts returned by specifying +// the max-parts request parameter. If your multipart upload consists of more +// than 1,000 parts, the response returns an IsTruncated field with the value +// of true, and a NextPartNumberMarker element. In subsequent ListParts requests +// you can include the part-number-marker query string parameter and set its +// value to the NextPartNumberMarker field value from the previous response. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListParts: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListMultipartUploads // // // Example sending a request using ListPartsRequest. // req := client.ListPartsRequest(params) diff --git a/service/s3/api_op_PutBucketAccelerateConfiguration.go b/service/s3/api_op_PutBucketAccelerateConfiguration.go index afdcd157607..0c62202429c 100644 --- a/service/s3/api_op_PutBucketAccelerateConfiguration.go +++ b/service/s3/api_op_PutBucketAccelerateConfiguration.go @@ -14,7 +14,7 @@ import ( type PutBucketAccelerateConfigurationInput struct { _ struct{} `type:"structure" payload:"AccelerateConfiguration"` - // Specifies the Accelerate Configuration you want to set for the bucket. + // Container for setting the transfer acceleration state. // // AccelerateConfiguration is a required field AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -92,7 +92,41 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" // PutBucketAccelerateConfigurationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Sets the accelerate configuration of an existing bucket. +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster +// data transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// +// * Enabled – Enables accelerated data transfers to the bucket. +// +// * Suspended – Disables accelerated data transfers to the bucket. +// +// The GetBucketAccelerateConfiguration operation returns the transfer acceleration +// state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it +// might take up to thirty minutes before the data transfer rates to the bucket +// increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant +// and must not contain periods ("."). +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// The following operations are related to PutBucketAccelerateConfiguration: +// +// * GetBucketAccelerateConfiguration +// +// * CreateBucket // // // Example sending a request using PutBucketAccelerateConfigurationRequest. // req := client.PutBucketAccelerateConfigurationRequest(params) diff --git a/service/s3/api_op_PutBucketAcl.go b/service/s3/api_op_PutBucketAcl.go index 1af44354545..fce1e8026b5 100644 --- a/service/s3/api_op_PutBucketAcl.go +++ b/service/s3/api_op_PutBucketAcl.go @@ -20,6 +20,8 @@ type PutBucketAclInput struct { // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The bucket to which to apply the ACL. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -144,7 +146,80 @@ const opPutBucketAcl = "PutBucketAcl" // PutBucketAclRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Sets the permissions on a bucket using access control lists (ACL). +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// To set the ACL of a bucket, you must have WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// +// * Specify the ACL in the request body +// +// * Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then +// you can continue to use that approach. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers you specify explicit access permissions and grantees (AWS +// accounts or a Amazon S3 groups) who will receive the permission. If you +// use these ACL specific headers, you cannot use x-amz-acl header to set +// a canned ACL. These parameters map to the set of permissions that Amazon +// S3 supports in an ACL. For more information, see Access Control List (ACL) +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: emailAddress – if the value specified is the email address +// of an AWS account id – if the value specified is the canonical user +// ID of an AWS account uri – if you are granting permissions to a predefined +// group For example, the following x-amz-grant-write header grants create, +// overwrite, and delete objects permission to LogDelivery group predefined +// by Amazon S3 and two AWS accounts identified by their email addresses. +// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// Related Resources +// +// * CreateBucket +// +// * DeleteBucket +// +// * GetObjectAcl // // // Example sending a request using PutBucketAclRequest. // req := client.PutBucketAclRequest(params) diff --git a/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/service/s3/api_op_PutBucketAnalyticsConfiguration.go index 38bc5a11c76..dd4e98a52da 100644 --- a/service/s3/api_op_PutBucketAnalyticsConfiguration.go +++ b/service/s3/api_op_PutBucketAnalyticsConfiguration.go @@ -113,7 +113,50 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" // Amazon Simple Storage Service. // // Sets an analytics configuration for the bucket (specified by the analytics -// configuration ID). +// configuration ID). You can have up to 1,000 analytics configurations per +// bucket. +// +// You can choose to have storage class analysis export analysis reports to +// a comma-separated values (CSV) flat file, see the DataExport request element. +// Reports are updated daily and are based on the object filters you configure. +// When selecting data export you specify a destination bucket and optional +// destination prefix where the file is written. You can export the data to +// a destination bucket in a different account. However, the destination bucket +// must be in the same region as the bucket that you are making the PUT analytics +// configuration to. For more information, see Amazon S3 Analytics – Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Special Errors +// +// * HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid +// argument. +// +// * HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause: +// You are attempting to create a new configuration but have already reached +// the 1,000-configuration limit. +// +// * HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not +// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * +// +// * +// +// * // // // Example sending a request using PutBucketAnalyticsConfigurationRequest. // req := client.PutBucketAnalyticsConfigurationRequest(params) diff --git a/service/s3/api_op_PutBucketCors.go b/service/s3/api_op_PutBucketCors.go index 87f605b4e21..154474d8b6e 100644 --- a/service/s3/api_op_PutBucketCors.go +++ b/service/s3/api_op_PutBucketCors.go @@ -14,12 +14,14 @@ import ( type PutBucketCorsInput struct { _ struct{} `type:"structure" payload:"CORSConfiguration"` + // Specifies the bucket impacted by the corsconfiguration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Describes the cross-origin access configuration for objects in an Amazon // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // (https://docs.aws.amazon.com/AmazonS3/latest/dev//cors.html) in the Amazon // Simple Storage Service Developer Guide. // // CORSConfiguration is a required field @@ -98,7 +100,49 @@ const opPutBucketCors = "PutBucketCors" // PutBucketCorsRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Sets the CORS configuration for a bucket. +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// You set this configuration on a bucket so that the bucket can service cross-origin +// requests. For example, you might want to enable a request whose origin is +// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com +// by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which +// you configure rules that identify origins and the HTTP methods that can be +// executed on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) +// against a bucket, it evaluates the cors configuration on the bucket and uses +// the first CORSRule rule that matches the incoming browser request to enable +// a cross-origin request. For a rule to match, the following conditions must +// be met: +// +// * The request's Origin header must match AllowedOrigin elements. +// +// * The request method (for example, GET, PUT, HEAD and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// * Every header specified in the Access-Control-Request-Headers request +// header of a pre-flight request must match an AllowedHeader element. +// +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketCors +// +// * DeleteBucketCors +// +// * RESTOPTIONSobject // // // Example sending a request using PutBucketCorsRequest. // req := client.PutBucketCorsRequest(params) diff --git a/service/s3/api_op_PutBucketEncryption.go b/service/s3/api_op_PutBucketEncryption.go index d93e2456a6b..d405b3282e8 100644 --- a/service/s3/api_op_PutBucketEncryption.go +++ b/service/s3/api_op_PutBucketEncryption.go @@ -15,9 +15,9 @@ type PutBucketEncryptionInput struct { _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information - // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket - // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS + // (SSE-KMS). For information about the Amazon S3 default encryption feature, + // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -101,8 +101,32 @@ const opPutBucketEncryption = "PutBucketEncryption" // PutBucketEncryptionRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Creates a new server-side encryption configuration (or replaces an existing -// one, if present). +// This implementation of the PUT operation uses the encryption subresource +// to set the default encryption state of an existing bucket. +// +// This implementation of the PUT operation sets default encryption for a buckets +// using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS +// customer master keys (CMKs) (SSE-KMS) bucket. For information about the Amazon +// S3 default encryption feature, see As a security precaution, the root user +// of the AWS account that owns a bucket can always use this operation, even +// if the policy explicitly denies the root user the ability to perform this +// action. in the Amazon Simple Storage Service Developer Guide. +// +// This operation requires AWS Signature Version 4. For more information, see +// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketEncryption +// +// * DeleteBucketEncryption // // // Example sending a request using PutBucketEncryptionRequest. // req := client.PutBucketEncryptionRequest(params) diff --git a/service/s3/api_op_PutBucketInventoryConfiguration.go b/service/s3/api_op_PutBucketInventoryConfiguration.go index 0fd5fe69a8c..f571ac5b00d 100644 --- a/service/s3/api_op_PutBucketInventoryConfiguration.go +++ b/service/s3/api_op_PutBucketInventoryConfiguration.go @@ -112,8 +112,54 @@ const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // PutBucketInventoryConfigurationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Adds an inventory configuration (identified by the inventory ID) from the -// bucket. +// This implementation of the PUT operation adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. +// +// Amazon S3 inventory generates inventories of the objects in the bucket on +// a daily or weekly basis, and the results are published to a flat file. The +// bucket that is inventoried is called the source bucket, and the bucket where +// the inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same AWS Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate +// the inventory daily or weekly. You can also configure what object metadata +// to include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev//storage-inventory.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For +// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage +// Class Analysis. (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutInventoryConfiguration +// bucket permission to set the configuration on the bucket +// +// Related Resources +// +// * GetBucketInventoryConfiguration +// +// * DeleteBucketInventoryConfiguration +// +// * ListBucketInventoryConfigurations // // // Example sending a request using PutBucketInventoryConfigurationRequest. // req := client.PutBucketInventoryConfigurationRequest(params) diff --git a/service/s3/api_op_PutBucketLifecycle.go b/service/s3/api_op_PutBucketLifecycle.go index 59118f6ab12..d27cf09af14 100644 --- a/service/s3/api_op_PutBucketLifecycle.go +++ b/service/s3/api_op_PutBucketLifecycle.go @@ -17,6 +17,7 @@ type PutBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for lifecycle rules. You can add as many as 1000 rules. LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -88,7 +89,55 @@ const opPutBucketLifecycle = "PutBucketLifecycle" // PutBucketLifecycleRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// No longer used, see the PutBucketLifecycleConfiguration operation. +// +// For an updated version of this API, see PutBucketLifecycleConfiguration. +// This version has been deprecated. Existing lifecycle configurations will +// work. For new lifecycle configurations, use the updated API. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev//object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// By default, all Amazon S3 resources, including buckets, objects, and related +// subresources (for example, lifecycle configuration and website configuration) +// are private. Only the resource owner, the AWS account that created the resource, +// can access it. The resource owner can optionally grant access permissions +// to others by writing an access policy. For this operation, users must get +// the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit denial also supersedes +// any other permissions. If you want to prevent users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more examples of transitioning objects to storage classes such as STANDARD_IA +// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev//intro-lifecycle-rules.html#lifecycle-configuration-examples). +// +// Related Resources +// +// * GetBucketLifecycle(Deprecated) +// +// * GetBucketLifecycleConfiguration +// +// * +// +// * By default, a resource owner—in this case, a bucket owner, which is +// the AWS account that created the bucket—can perform any of the operations. +// A resource owner can also grant others permission to perform the operation. +// For more information, see the following topics in the Amazon Simple Storage +// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) // // // Example sending a request using PutBucketLifecycleRequest. // req := client.PutBucketLifecycleRequest(params) diff --git a/service/s3/api_op_PutBucketLifecycleConfiguration.go b/service/s3/api_op_PutBucketLifecycleConfiguration.go index 193a2a389ac..9099f9827f3 100644 --- a/service/s3/api_op_PutBucketLifecycleConfiguration.go +++ b/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -14,12 +14,12 @@ import ( type PutBucketLifecycleConfigurationInput struct { _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + // The name of the bucket for which to set the configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. - // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) - // in the Amazon Simple Storage Service Developer Guide. + // Container for lifecycle rules. You can add as many as 1,000 rules. LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -91,8 +91,69 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" // PutBucketLifecycleConfigurationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Sets lifecycle configuration for your bucket. If a lifecycle configuration -// exists, it replaces it. +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The previous version +// of the API supported filtering based only on an object key name prefix, which +// is supported for backward compatibility. For the related API description, +// see PutBucketLifecycle. +// +// Rules +// +// You specify the lifecycle configuration in your request body. The lifecycle +// configuration is specified as XML consisting of one or more rules. Each rule +// consists of the following: +// +// * Filter identifying a subset of objects to which the rule applies. The +// filter can be based on a key name prefix, object tags, or a combination +// of both. +// +// * Status whether the rule is in effect. +// +// * One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state +// of your bucket is versioning-enabled or versioning-suspended, you can +// have many versions of the same object (one current version and zero or +// more noncurrent versions). Amazon S3 provides predefined actions that +// you can specify for current and noncurrent object versions. +// +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// +// Permissions +// +// By default, all Amazon S3 resources are private, including buckets, objects, +// and related subresources (for example, lifecycle configuration and website +// configuration). Only the resource owner (that is, the AWS account that created +// it) can access the resource. The resource owner can optionally grant access +// permissions to others by writing an access policy. For this operation, a +// user must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following are related to PutBucketLifecycleConfiguration: +// +// * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// * GetBucketLifecycleConfiguration +// +// * DeleteBucketLifecycle // // // Example sending a request using PutBucketLifecycleConfigurationRequest. // req := client.PutBucketLifecycleConfigurationRequest(params) diff --git a/service/s3/api_op_PutBucketLogging.go b/service/s3/api_op_PutBucketLogging.go index 0974bbe2e6b..170d7e26402 100644 --- a/service/s3/api_op_PutBucketLogging.go +++ b/service/s3/api_op_PutBucketLogging.go @@ -14,9 +14,13 @@ import ( type PutBucketLoggingInput struct { _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + // The name of the bucket for which to set the logging parameters. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for logging status information. + // // BucketLoggingStatus is a required field BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -94,9 +98,52 @@ const opPutBucketLogging = "PutBucketLogging" // Amazon Simple Storage Service. // // Set the logging parameters for a bucket and to specify permissions for who -// can view and modify the logging parameters. To set the logging status of +// can view and modify the logging parameters. All logs are saved to buckets +// in the same AWS Region as the source bucket. To set the logging status of // a bucket, you must be the bucket owner. // +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use +// the Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By Email address: <>Grantees@email.com<> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable logging, you use LoggingEnabled and its children request elements. +// To disable logging, you use an empty BucketLoggingStatus request element: +// +// +// +// For more information about server access logging, see Server Access Logging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html). +// +// For more information about creating a bucket, see CreateBucket. For more +// information about returning the logging status of a bucket, see GetBucketLogging. +// +// The following operations are related to PutBucketLogging: +// +// * PutObject +// +// * DeleteBucket +// +// * CreateBucket +// +// * GetBucketLogging +// // // Example sending a request using PutBucketLoggingRequest. // req := client.PutBucketLoggingRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_PutBucketMetricsConfiguration.go b/service/s3/api_op_PutBucketMetricsConfiguration.go index 10df5c6fedd..8d357585fea 100644 --- a/service/s3/api_op_PutBucketMetricsConfiguration.go +++ b/service/s3/api_op_PutBucketMetricsConfiguration.go @@ -113,7 +113,33 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" // Amazon Simple Storage Service. // // Sets a metrics configuration (specified by the metrics configuration ID) -// for the bucket. +// for the bucket. You can have up to 1,000 metrics configurations per bucket. +// If you're updating an existing metrics configuration, note that this is a +// full replacement of the existing metrics configuration. If you don't include +// the elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to PutBucketMetricsConfiguration: +// +// * DeleteBucketMetricsConfiguration +// +// * PutBucketMetricsConfiguration +// +// * ListBucketMetricsConfigurations +// +// GetBucketLifecycle has the following special error: +// +// * Error code: TooManyConfigurations Description:You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. HTTP Status Code: HTTP 400 Bad Request // // // Example sending a request using PutBucketMetricsConfigurationRequest. // req := client.PutBucketMetricsConfigurationRequest(params) diff --git a/service/s3/api_op_PutBucketNotification.go b/service/s3/api_op_PutBucketNotification.go index 023706a1ff7..ad89e91cfd7 100644 --- a/service/s3/api_op_PutBucketNotification.go +++ b/service/s3/api_op_PutBucketNotification.go @@ -14,9 +14,13 @@ import ( type PutBucketNotificationInput struct { _ struct{} `type:"structure" payload:"NotificationConfiguration"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The container for the configuration. + // // NotificationConfiguration is a required field NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } diff --git a/service/s3/api_op_PutBucketNotificationConfiguration.go b/service/s3/api_op_PutBucketNotificationConfiguration.go index e2aae655c1c..3ee2e99dc92 100644 --- a/service/s3/api_op_PutBucketNotificationConfiguration.go +++ b/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -14,6 +14,8 @@ import ( type PutBucketNotificationConfigurationInput struct { _ struct{} `type:"structure" payload:"NotificationConfiguration"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -96,7 +98,55 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration // PutBucketNotificationConfigurationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Enables notifications of specified events for a bucket. +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// Using this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an +// event notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration. +// +// +// +// +// +// This operation replaces the existing notification configuration with the +// configuration you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon +// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon +// SQS) destination exists, and that the bucket owner has permission to publish +// to it by sending a test notification. In the case of AWS Lambda destinations, +// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission +// to invoke the function from the Amazon S3 bucket. For more information, see +// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with s3:PutBucketNotification permission. +// +// The PUT notification is an atomic operation. For example, suppose your notification +// configuration includes SNS topic, SQS queue, and Lambda function configurations. +// When you send a PUT request with this configuration, Amazon S3 sends test +// messages to your SNS topic. If the message fails, the entire PUT operation +// will fail, and Amazon S3 will not add the configuration to your bucket. +// +// Responses +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to topic. +// +// The following operations is related to PutBucketNotificationConfiguration: +// +// * GetBucketNotificationConfiguration // // // Example sending a request using PutBucketNotificationConfigurationRequest. // req := client.PutBucketNotificationConfigurationRequest(params) diff --git a/service/s3/api_op_PutBucketPolicy.go b/service/s3/api_op_PutBucketPolicy.go index b3dfd36bc4e..be280209ef5 100644 --- a/service/s3/api_op_PutBucketPolicy.go +++ b/service/s3/api_op_PutBucketPolicy.go @@ -14,6 +14,8 @@ import ( type PutBucketPolicyInput struct { _ struct{} `type:"structure" payload:"Policy"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -100,7 +102,28 @@ const opPutBucketPolicy = "PutBucketPolicy" // PutBucketPolicyRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using +// an identity other than the root user of the AWS account that owns the bucket, +// the calling identity must have the PutBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to PutBucketPolicy: +// +// * CreateBucket +// +// * DeleteBucket // // // Example sending a request using PutBucketPolicyRequest. // req := client.PutBucketPolicyRequest(params) diff --git a/service/s3/api_op_PutBucketReplication.go b/service/s3/api_op_PutBucketReplication.go index 04089908283..3d344f7ea80 100644 --- a/service/s3/api_op_PutBucketReplication.go +++ b/service/s3/api_op_PutBucketReplication.go @@ -14,6 +14,8 @@ import ( type PutBucketReplicationInput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + // The name of the bucket + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -23,7 +25,6 @@ type PutBucketReplicationInput struct { // ReplicationConfiguration is a required field ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // A token that allows Amazon S3 object lock to be enabled for an existing bucket. Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } @@ -106,9 +107,65 @@ const opPutBucketReplication = "PutBucketReplication" // Amazon Simple Storage Service. // // Creates a replication configuration or replaces an existing one. For more -// information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. // +// To perform this operation, the user or role performing the operation must +// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket where you want +// Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to +// replicate objects on your behalf, and other relevant information. +// +// A replication configuration must include at least one rule, and can contain +// a maximum of 1,000. Each rule identifies a subset of objects to replicate +// by filtering the objects in the source bucket. To choose additional subsets +// of objects to replicate, add a rule for each subset. All rules must specify +// the same destination bucket. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. +// When you add the Filter element in the configuration, you must also add the +// following elements: DeleteMarkerReplication, Status, and Priority. +// +// For information about enabling versioning on a bucket, see Using Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). +// +// By default, a resource owner, in this case the AWS account that created the +// bucket, can perform this operation. The resource owner can also grant others +// permissions to perform the operation. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Handling Replication of Encrypted Objects +// +// By default, Amazon S3 doesn't replicate objects that are stored at rest using +// server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted +// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, +// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about +// replication configuration, see Replicating Objects Created with SSE Using +// CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// +// PutBucketReplication has the following special errors: +// +// * Error code: InvalidRequest Description: If the in +// has a value, the element must be specified. HTTP 400 +// +// * Error code: InvalidArgument Description: The element is empty. +// It must contain a valid account ID. HTTP 400 +// +// * Error code: InvalidArgument Description: The AWS account specified in +// the element must match the destination bucket owner. HTTP 400 +// +// The following operations are related to PutBucketReplication: +// +// * GetBucketReplication +// +// * DeleteBucketReplication +// // // Example sending a request using PutBucketReplicationRequest. // req := client.PutBucketReplicationRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_PutBucketRequestPayment.go b/service/s3/api_op_PutBucketRequestPayment.go index ec8f1eaa604..7ef364b6492 100644 --- a/service/s3/api_op_PutBucketRequestPayment.go +++ b/service/s3/api_op_PutBucketRequestPayment.go @@ -14,9 +14,13 @@ import ( type PutBucketRequestPaymentInput struct { _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for Payer. + // // RequestPaymentConfiguration is a required field RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -96,8 +100,14 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment" // Sets the request payment configuration for a bucket. By default, the bucket // owner pays for downloads from the bucket. This configuration parameter enables // the bucket owner (only) to specify that the person requesting the download -// will be charged for the download. Documentation on requester pays buckets -// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// will be charged for the download. For more information, see Requester Pays +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to PutBucketRequestPayment: +// +// * CreateBucket +// +// * GetBucketRequestPayment // // // Example sending a request using PutBucketRequestPaymentRequest. // req := client.PutBucketRequestPaymentRequest(params) diff --git a/service/s3/api_op_PutBucketTagging.go b/service/s3/api_op_PutBucketTagging.go index 2f54d4db631..921131188ff 100644 --- a/service/s3/api_op_PutBucketTagging.go +++ b/service/s3/api_op_PutBucketTagging.go @@ -14,9 +14,13 @@ import ( type PutBucketTaggingInput struct { _ struct{} `type:"structure" payload:"Tagging"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for the TagSet and Tag elements. + // // Tagging is a required field Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -95,6 +99,47 @@ const opPutBucketTagging = "PutBucketTagging" // // Sets the tags for a bucket. // +// Use tags to organize your AWS bill to reflect your own cost structure. To +// do this, sign up to get your AWS account bill with tag key values included. +// Then, to see the cost of combined resources, organize your billing information +// according to resources with the same tag key values. For example, you can +// tag several resources with a specific application name, and then organize +// your billing information to see the total cost of that application across +// several services. For more information, see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html). +// +// Within a bucket, if you add a tag that has the same key as an existing tag, +// the new value overwrites the old value. For more information, see Using Cost +// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). +// +// To use this operation, you must have permissions to perform the s3:PutBucketTagging +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// PutBucketTagging has the following special errors: +// +// * Error code: InvalidTagError Description: The tag provided was not a +// valid tag. This error can occur if the tag did not pass input validation. +// For information about tag restrictions, see User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2//allocation-tag-restrictions.html) +// and AWS-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2//aws-tag-restrictions.html). +// +// * Error code: MalformedXMLError Description: The XML provided does not +// match the schema. +// +// * Error code: OperationAbortedError Description: A conflicting conditional +// operation is currently in progress against this resource. Please try again. +// +// * Error code: InternalError Description: The service was unable to apply +// the provided tag to the bucket. +// +// The following operations are related to PutBucketTagging: +// +// * GetBucketTagging +// +// * DeleteBucketTagging +// // // Example sending a request using PutBucketTaggingRequest. // req := client.PutBucketTaggingRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_PutBucketVersioning.go b/service/s3/api_op_PutBucketVersioning.go index 1acee999ef9..c06399131cb 100644 --- a/service/s3/api_op_PutBucketVersioning.go +++ b/service/s3/api_op_PutBucketVersioning.go @@ -14,6 +14,8 @@ import ( type PutBucketVersioningInput struct { _ struct{} `type:"structure" payload:"VersioningConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -21,9 +23,7 @@ type PutBucketVersioningInput struct { // and the value that is displayed on your authentication device. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Describes the versioning state of an Amazon S3 bucket. For more information, - // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) - // in the Amazon Simple Storage Service API Reference. + // Container for setting the versioning state. // // VersioningConfiguration is a required field VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -105,6 +105,38 @@ const opPutBucketVersioning = "PutBucketVersioning" // Sets the versioning state of an existing bucket. To set the versioning state, // you must be the bucket owner. // +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added +// to the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects +// added to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a GetBucketVersioning request does not return a versioning state value. +// +// If the bucket owner enables MFA Delete in the bucket versioning configuration, +// the bucket owner must include the x-amz-mfa request header and the Status +// and the MfaDelete request elements in a request to set the versioning state +// of the bucket. +// +// If you have an object expiration lifecycle policy in your non-versioned bucket +// and you want to maintain the same permanent delete behavior when you enable +// versioning, you must add a noncurrent expiration policy. The noncurrent expiration +// lifecycle policy will manage the deletes of the noncurrent object versions +// in the version-enabled bucket. (A version-enabled bucket maintains one current +// and zero or more noncurrent object versions.) For more information, see Lifecycle +// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// +// Related Resources +// +// * CreateBucket +// +// * DeleteBucket +// +// * GetBucketVersioning +// // // Example sending a request using PutBucketVersioningRequest. // req := client.PutBucketVersioningRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_PutBucketWebsite.go b/service/s3/api_op_PutBucketWebsite.go index 1267c6e2a47..6f3d8a10628 100644 --- a/service/s3/api_op_PutBucketWebsite.go +++ b/service/s3/api_op_PutBucketWebsite.go @@ -14,10 +14,12 @@ import ( type PutBucketWebsiteInput struct { _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies website configuration parameters for an Amazon S3 bucket. + // Container for the request. // // WebsiteConfiguration is a required field WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -95,7 +97,67 @@ const opPutBucketWebsite = "PutBucketWebsite" // PutBucketWebsiteRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Set the website configuration for a bucket. +// Sets the configuration of the website that is specified in the website subresource. +// To configure a bucket as a website, you can add this subresource on the bucket +// with website configuration information such as the file name of the index +// document and any redirect rules. For more information, see Hosting Websites +// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This PUT operation requires the S3:PutBucketWebsite permission. By default, +// only the bucket owner can configure the website attached to a bucket; however, +// bucket owners can allow other users to set the website configuration by writing +// a bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you +// add a website configuration with the following elements. Because all requests +// are sent to another website, you don't need to provide index document name +// for the bucket. +// +// * WebsiteConfiguration +// +// * RedirectAllRequestsTo +// +// * HostName +// +// * Protocol +// +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website configuration +// must provide an index document for the bucket, because some requests might +// not be redirected. +// +// * WebsiteConfiguration +// +// * IndexDocument +// +// * Suffix +// +// * ErrorDocument +// +// * Key +// +// * RoutingRules +// +// * RoutingRule +// +// * Condition +// +// * HttpErrorCodeReturnedEquals +// +// * KeyPrefixEquals +// +// * Redirect +// +// * Protocol +// +// * HostName +// +// * ReplaceKeyPrefixWith +// +// * ReplaceKeyWith +// +// * HttpRedirectCode // // // Example sending a request using PutBucketWebsiteRequest. // req := client.PutBucketWebsiteRequest(params) diff --git a/service/s3/api_op_PutObject.go b/service/s3/api_op_PutObject.go index 847d885dabe..6fde25f1ab7 100644 --- a/service/s3/api_op_PutObject.go +++ b/service/s3/api_op_PutObject.go @@ -15,7 +15,8 @@ import ( type PutObjectInput struct { _ struct{} `type:"structure" payload:"Body"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). ACL ObjectCannedACL `location:"header" locationName:"x-amz-acl" type:"string" enum:"true"` // Object data. @@ -26,33 +27,43 @@ type PutObjectInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies caching behavior along the request/reply chain. + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Specifies presentational information for the object. + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` // The language the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. + // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI. This parameted is required - // if object lock parameters are specified. + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - // A standard MIME type describing the format of the object data. + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The date and time at which the object is no longer cacheable. + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. @@ -75,13 +86,14 @@ type PutObjectInput struct { // A map of metadata to store with the object in S3. Metadata map[string]string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // The Legal Hold status that you want to apply to the specified object. + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockLegalHoldStatus ObjectLockLegalHoldStatus `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"true"` - // The object lock mode that you want to apply to this object. + // The Object Lock mode that you want to apply to this object. ObjectLockMode ObjectLockMode `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"true"` - // The date and time when you want this object's object lock to expire. + // The date and time when you want this object's Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that she or he will be charged for the @@ -110,17 +122,22 @@ type PutObjectInput struct { // encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // If the x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // customer master key (CMK) that was used for the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the AWS KMS CMK that will be used for the object. If you specify + // x-amz-server-side-encryption:aws:kms, but do not provide x-amz-server-side-encryption-aws-kms-key-id, + // Amazon S3 uses the AWS managed CMK in AWS to protect the data. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). ServerSideEncryption ServerSideEncryption `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"true"` - // The type of storage to use for the object. Defaults to 'STANDARD'. + // If you don't specify, Standard is the default storage class. Amazon S3 supports + // other storage classes. StorageClass StorageClass `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"true"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. @@ -129,7 +146,22 @@ type PutObjectInput struct { // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. + // the value of this header in the object metadata. For information about object + // metadata, see . + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -373,8 +405,10 @@ type PutObjectOutput struct { // Entity tag for the uploaded object. ETag *string `location:"header" locationName:"ETag" type:"string"` - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + // If the expiration is configured for the object (see PutBucketLifecycleConfiguration), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs that provide information about object expiration. The value + // of the rule-id is URL encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // If present, indicates that the requester was successfully charged for the @@ -396,12 +430,15 @@ type PutObjectOutput struct { // the encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If the x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (KMS) customer + // master key (CMK) that was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If you specified server-side encryption either with an AWS KMS customer master + // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 + // used to encrypt the object. ServerSideEncryption ServerSideEncryption `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"true"` // Version of the object. @@ -477,7 +514,169 @@ const opPutObject = "PutObject" // PutObjectRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Adds an object to a bucket. +// Adds an object to a bucket. You must have WRITE permissions on a bucket to +// add an object to it. +// +// Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. +// +// Amazon S3 is a distributed system. If it receives multiple write requests +// for the same object simultaneously, it overwrites all but the last object +// written. Amazon S3 does not provide object locking; if you need this, make +// sure to build it into your application layer or use versioning instead. +// +// To ensure that data is not corrupted traversing the network, use the Content-MD5 +// header. When you use this header, Amazon S3 checks the object against the +// provided MD5 value and, if they do not match, returns an error. Additionally, +// you can calculate the MD5 while putting an object to Amazon S3 and compare +// the returned ETag to the calculated MD5 value. +// +// To configure your application to send the request headers before sending +// the request body, use the 100-continue HTTP status code. For PUT operations, +// this helps you avoid sending the message body if the message is rejected +// based on the headers (for example, because authentication fails or a redirect +// occurs). For more information on the 100-continue HTTP status code, see Section +// 8.2.3 of http://www.ietf.org/rfc/rfc2616.txt (http://www.ietf.org/rfc/rfc2616.txt). +// +// You can optionally request server-side encryption. With server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts the data when you access it. You have the option to provide +// your own encryption key or use AWS-managed encryption keys. For more information, +// see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// +// Access Permissions +// +// You can optionally specify the accounts or groups that should be granted +// specific permissions on the new object. There are two ways to grant the permissions +// using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use AWS-managed encryption keys or provide your own encryption key. +// +// * Use encryption keys managed Amazon S3 or customer master keys (CMKs) +// stored in AWS Key Management Service (KMS) – If you want AWS to manage +// the keys used to encrypt data, specify the following headers in the request. +// x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and +// PUT requests for an object protected by AWS KMS fail if you don't make +// them with SSL or by using SigV4. For more information on Server-Side Encryption +// with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side +// Encryption with CMKs stored in AWS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// x-amz-server-side​-encryption​-customer-algorithm x-amz-server-side​-encryption​-customer-key +// x-amz-server-side​-encryption​-customer-key-MD5 For more information +// on Server-Side Encryption with CMKs stored in KMS (SSE-KMS), see Protecting +// Data Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the Access Control List (ACL) on the object. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific AWS accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an +// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: emailAddress – if the value specified is the email address +// of an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) EU (Ireland) South America (São Paulo) For a list of all the +// Amazon S3 supported regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference id – if the value specified is the canonical +// user ID of an AWS account uri – if you are granting permissions to a +// predefined group For example, the following x-amz-grant-read header grants +// the AWS accounts identified by email addresses permissions to read object +// data and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", +// emailAddress="abc@amazon.com" +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use AWS-managed encryption keys or provide your own encryption key. +// +// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) +// stored in AWS Key Management Service (KMS) – If you want AWS to manage +// the keys used to encrypt data, specify the following headers in the request. +// x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side- encryption-aws-kms-key-id, Amazon +// S3 uses the default AWS KMS CMK to protect the data. All GET and PUT requests +// for an object protected by AWS KMS fail if you don't make them with SSL +// or by using SigV4. For more information on Server-Side Encryption with +// CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side +// Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// If you use this feature, the ETag value that Amazon S3 returns in the +// response is not the MD5 of the object. x-amz-server-side​-encryption​-customer-algorithm +// x-amz-server-side​-encryption​-customer-key x-amz-server-side​-encryption​-customer-key-MD5 +// For more information on Server-Side Encryption with CMKs stored in AWS +// KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs +// stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Storage Class Options +// +// By default, Amazon S3 uses the Standard storage class to store newly created +// objects. The Standard storage class provides high durability and high availability. +// You can specify other storage classes depending on the performance needs. +// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Versioning +// +// If you enable versioning for a bucket, Amazon S3 automatically generates +// a unique version ID for the object being stored. Amazon S3 returns this ID +// in the response using the x-amz-version-id response header. If versioning +// is suspended, Amazon S3 always uses null as the version ID for the object +// stored. For more information about returning the versioning state of a bucket, +// see GetBucketVersioning. If you enable versioning for a bucket, when Amazon +// S3 receives multiple write requests for the same object simultaneously, it +// stores all of the objects. +// +// Related Resources +// +// * CopyObject +// +// * DeleteObject // // // Example sending a request using PutObjectRequest. // req := client.PutObjectRequest(params) diff --git a/service/s3/api_op_PutObjectAcl.go b/service/s3/api_op_PutObjectAcl.go index 1e6ae45657f..d3929223aa8 100644 --- a/service/s3/api_op_PutObjectAcl.go +++ b/service/s3/api_op_PutObjectAcl.go @@ -13,12 +13,15 @@ import ( type PutObjectAclInput struct { _ struct{} `type:"structure" payload:"AccessControlPolicy"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) ACL ObjectCannedACL `location:"header" locationName:"x-amz-acl" type:"string" enum:"true"` // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The name of the bucket to which the ACL is being added. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -38,6 +41,8 @@ type PutObjectAclInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Key for which the PUT operation was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -191,7 +196,72 @@ const opPutObjectAcl = "PutObjectAcl" // Amazon Simple Storage Service. // // uses the acl subresource to set the access control list (ACL) permissions -// for an object that already exists in a bucket +// for an object that already exists in a bucket. You must have WRITE_ACP permission +// to set the ACL of an object. +// +// Depending on your application needs, you may choose to set the ACL on an +// object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request +// body, then you can continue to use that approach. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers you specify explicit access permissions and grantees (AWS +// accounts or a Amazon S3 groups) who will receive the permission. If you +// use these ACL specific headers, you cannot use x-amz-acl header to set +// a canned ACL. These parameters map to the set of permissions that Amazon +// S3 supports in an ACL. For more information, see Access Control List (ACL) +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: emailAddress – if the value specified is the email address +// of an AWS account id – if the value specified is the canonical user +// ID of an AWS account uri – if you are granting permissions to a predefined +// group For example, the following x-amz-grant-read header grants list objects +// permission to the two AWS accounts identified by their email addresses. +// x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// Versioning +// +// The ACL of an object is set at the object version level. By default, PUT +// sets the ACL of the current version of an object. To set the ACL of a different +// version, use the versionId subresource. +// +// Related Resources +// +// * CopyObject +// +// * GetObject // // // Example sending a request using PutObjectAclRequest. // req := client.PutObjectAclRequest(params) diff --git a/service/s3/api_op_PutObjectLegalHold.go b/service/s3/api_op_PutObjectLegalHold.go index 05970bc4541..e01a496e6b8 100644 --- a/service/s3/api_op_PutObjectLegalHold.go +++ b/service/s3/api_op_PutObjectLegalHold.go @@ -137,6 +137,10 @@ const opPutObjectLegalHold = "PutObjectLegalHold" // // Applies a Legal Hold configuration to the specified object. // +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// // // Example sending a request using PutObjectLegalHoldRequest. // req := client.PutObjectLegalHoldRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_PutObjectLockConfiguration.go b/service/s3/api_op_PutObjectLockConfiguration.go index 43b9c046c11..33318234077 100644 --- a/service/s3/api_op_PutObjectLockConfiguration.go +++ b/service/s3/api_op_PutObjectLockConfiguration.go @@ -13,12 +13,12 @@ import ( type PutObjectLockConfigurationInput struct { _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` - // The bucket whose object lock configuration you want to create or replace. + // The bucket whose Object Lock configuration you want to create or replace. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The object lock configuration that you want to apply to the specified bucket. + // The Object Lock configuration that you want to apply to the specified bucket. ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that she or he will be charged for the @@ -27,7 +27,7 @@ type PutObjectLockConfigurationInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer RequestPayer `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"true"` - // A token to allow Amazon S3 object lock to be enabled for an existing bucket. + // A token to allow Object Lock to be enabled for an existing bucket. Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } @@ -116,10 +116,17 @@ const opPutObjectLockConfiguration = "PutObjectLockConfiguration" // PutObjectLockConfigurationRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Places an object lock configuration on the specified bucket. The rule specified -// in the object lock configuration will be applied by default to every new +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new // object placed in the specified bucket. // +// DefaultRetention requires either Days or Years. You can't specify both at +// the same time. +// +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// // // Example sending a request using PutObjectLockConfigurationRequest. // req := client.PutObjectLockConfigurationRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_PutObjectRetention.go b/service/s3/api_op_PutObjectRetention.go index 2f694117c23..b2acb6b0f81 100644 --- a/service/s3/api_op_PutObjectRetention.go +++ b/service/s3/api_op_PutObjectRetention.go @@ -19,7 +19,7 @@ type PutObjectRetentionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates whether this operation should bypass Governance-mode restrictions.j + // Indicates whether this operation should bypass Governance-mode restrictions. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` // The key name for the object that you want to apply this Object Retention @@ -148,6 +148,10 @@ const opPutObjectRetention = "PutObjectRetention" // // Places an Object Retention configuration on an object. // +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// // // Example sending a request using PutObjectRetentionRequest. // req := client.PutObjectRetentionRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_PutObjectTagging.go b/service/s3/api_op_PutObjectTagging.go index b540f17623d..f7bf8c9eb5f 100644 --- a/service/s3/api_op_PutObjectTagging.go +++ b/service/s3/api_op_PutObjectTagging.go @@ -13,15 +13,22 @@ import ( type PutObjectTaggingInput struct { _ struct{} `type:"structure" payload:"Tagging"` + // The bucket containing the object. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Name of the tag. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Container for the TagSet and Tag elements + // // Tagging is a required field Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The versionId of the object that the tag-set will be added to. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -100,6 +107,7 @@ func (s PutObjectTaggingInput) MarshalFields(e protocol.FieldEncoder) error { type PutObjectTaggingOutput struct { _ struct{} `type:"structure"` + // The versionId of the object the tag-set was added to. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -126,6 +134,43 @@ const opPutObjectTagging = "PutObjectTagging" // // Sets the supplied tag-set to an object that already exists in a bucket // +// A tag is a key-value pair. You can associate tags with an object by sending +// a PUT request against the tagging subresource that is associated with the +// object. You can retrieve tags by sending a GET request. For more information, +// see GetObjectTagging. +// +// For tagging-related restrictions related to characters and encodings, see +// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// +// To use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// To put tags of any other version, use the versionId query parameter. You +// also need permission for the s3:PutObjectVersionTagging action. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// Special Errors +// +// * Code: InvalidTagError Cause: The tag provided was not a valid tag. This +// error can occur if the tag did not pass input validation. For more information, +// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// * Code: MalformedXMLError Cause: The XML provided does not match the schema. +// +// * Code: OperationAbortedError Cause: A conflicting conditional operation +// is currently in progress against this resource. Please try again. +// +// * Code: InternalError Cause: The service was unable to apply the provided +// tag to the object. +// +// Related Resources +// +// * GetObjectTagging +// // // Example sending a request using PutObjectTaggingRequest. // req := client.PutObjectTaggingRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_PutPublicAccessBlock.go b/service/s3/api_op_PutPublicAccessBlock.go index 30019c60539..e76b3bb0756 100644 --- a/service/s3/api_op_PutPublicAccessBlock.go +++ b/service/s3/api_op_PutPublicAccessBlock.go @@ -98,7 +98,29 @@ const opPutPublicAccessBlock = "PutPublicAccessBlock" // Amazon Simple Storage Service. // // Creates or modifies the PublicAccessBlock configuration for an Amazon S3 -// bucket. +// bucket. In order to use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock configurations are different between the bucket +// and the account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// Related Resources +// +// * GetPublicAccessBlock +// +// * DeletePublicAccessBlock +// +// * GetBucketPolicyStatus +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) // // // Example sending a request using PutPublicAccessBlockRequest. // req := client.PutPublicAccessBlockRequest(params) diff --git a/service/s3/api_op_RestoreObject.go b/service/s3/api_op_RestoreObject.go index 346dfb57431..0fd2b62dc40 100644 --- a/service/s3/api_op_RestoreObject.go +++ b/service/s3/api_op_RestoreObject.go @@ -13,9 +13,13 @@ import ( type RestoreObjectInput struct { _ struct{} `type:"structure" payload:"RestoreRequest"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the operation was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -28,6 +32,7 @@ type RestoreObjectInput struct { // Container for restore job parameters. RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // VersionId used to reference a specific version of the object. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -146,6 +151,190 @@ const opRestoreObject = "RestoreObject" // // Restores an archived copy of an object back into Amazon S3 // +// This operation performs the following types of requests: +// +// * select - Perform a select query on an archived object +// +// * restore an archive - Restore an archived object +// +// To use this operation, you must have permissions to perform the s3:RestoreObject +// and s3:GetObject actions. The bucket owner has this permission by default +// and can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Querying Archives with Select Requests +// +// You use a select type of request to perform SQL queries on archived objects. +// The archived objects that are being queried by the select request must be +// formatted as uncompressed comma-separated values (CSV) files. You can run +// queries and custom analytics on your archived data without having to restore +// your data to a hotter Amazon S3 tier. For an overview about select requests, +// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, do the following: +// +// * Define an output location for the select query's output. This must be +// an Amazon S3 bucket in the same AWS Region as the bucket that contains +// the archive object that is being queried. The AWS account that initiates +// the job must have permissions to write to the S3 bucket. You can specify +// the storage class and encryption for the output objects stored in the +// bucket. For more information about output, see Querying Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. For more information +// about the S3 structure in the request body, see the following: PutObject +// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon Simple Storage Service Developer Guide Protecting Data Using +// Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide +// +// * Define the SQL expression for the SELECT type of restoration for your +// query in the request body's SelectParameters structure. You can use expressions +// like the following examples. The following expression returns all records +// from the specified object. SELECT * FROM Object Assuming that you are +// not using any headers for data stored in the object, you can specify columns +// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > +// 100 If you have headers and you set the fileHeaderInfo in the CSV structure +// in the request body to USE, you can specify headers in the query. (If +// you set the fileHeaderInfo field to IGNORE, the first row is skipped for +// the query.) You cannot mix ordinal positions with header column names. +// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s +// +// For more information about using SQL with Glacier Select restore, see SQL +// Reference for Amazon S3 Select and Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, you can also do the following: +// +// * To expedite your queries, specify the Expedited tier. For more information +// about tiers, see "Restoring Archives," later in this topic. +// +// * Specify details about the data serialization format of both the input +// object that is being queried and the serialization of the CSV-encoded +// query results. +// +// The following are additional important facts about the select feature: +// +// * The output results are new Amazon S3 objects. Unlike archive retrievals, +// they are stored until explicitly deleted-manually or through a lifecycle +// policy. +// +// * You can issue more than one select request on the same Amazon S3 object. +// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. +// +// * Amazon S3 accepts a select request even if the object has already been +// restored. A select request doesn’t return error response 409. +// +// Restoring Archives +// +// Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To +// access an archived object, you must first initiate a restore request. This +// restores a temporary copy of the archived object. In a restore request, you +// specify the number of days that you want the restored copy to exist. After +// the specified period, Amazon S3 deletes the temporary copy but the object +// remains archived in the GLACIER or DEEP_ARCHIVE storage class that object +// was restored from. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// The time it takes restore jobs to finish depends on which storage class the +// object is being restored from and which data access tier you specify. +// +// When restoring an archived object (or using a select request), you can specify +// one of the following data access tier options in the Tier element of the +// request body: +// +// * Expedited - Expedited retrievals allow you to quickly access your data +// stored in the GLACIER storage class when occasional urgent requests for +// a subset of archives are required. For all but the largest archived objects +// (250 MB+), data accessed using Expedited retrievals are typically made +// available within 1–5 minutes. Provisioned capacity ensures that retrieval +// capacity for Expedited retrievals is available when you need it. Expedited +// retrievals and provisioned capacity are not available for the DEEP_ARCHIVE +// storage class. +// +// * Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for the GLACIER +// and DEEP_ARCHIVE retrieval requests that do not specify the retrieval +// option. Standard retrievals typically complete within 3-5 hours from the +// GLACIER storage class and typically complete within 12 hours from the +// DEEP_ARCHIVE storage class. +// +// * Bulk - Bulk retrievals are Amazon Glacier’s lowest-cost retrieval +// option, enabling you to retrieve large amounts, even petabytes, of data +// inexpensively in a day. Bulk retrievals typically complete within 5-12 +// hours from the GLACIER storage class and typically complete within 48 +// hours from the DEEP_ARCHIVE storage class. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to +// a faster speed while it is in progress. You upgrade the speed of an in-progress +// restoration by issuing another restore request to the same object, setting +// a new Tier request element. When issuing a request to upgrade the restore +// tier, you must choose a tier that is faster than the tier that the in-progress +// restore is using. You must not change any other parameters, such as the Days +// request element. For more information, see Upgrading the Speed of an In-Progress +// Restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To get the status of object restoration, you can send a HEAD request. Operations +// return the x-amz-restore header, which provides information about the restoration +// status, in the response. You can use Amazon S3 event notifications to notify +// you when a restore is initiated or completed. For more information, see Configuring +// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// After restoring an archived object, you can update the restoration period +// by reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there +// are no data transfer charges. You cannot update the restoration period when +// Amazon S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy +// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes +// the object in 3 days. For more information about lifecycle configuration, +// see PutBucketLifecycleConfiguration and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon Simple Storage Service Developer Guide. +// +// Responses +// +// A successful operation returns either the 200 OK or 202 Accepted status code. +// +// * If the object copy is not previously restored, then Amazon S3 returns +// 202 Accepted in the response. +// +// * If the object copy is previously restored, Amazon S3 returns 200 OK +// in the response. +// +// Special Errors +// +// * Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. +// (This error does not apply to SELECT type requests.) HTTP Status Code: +// 409 Conflict SOAP Fault Code Prefix: Client +// +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: Glacier expedited +// retrievals are currently not available. Try again later. (Returned if +// there is insufficient capacity to process the Expedited request. This +// error applies only to Expedited retrievals and not to Standard or Bulk +// retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A +// +// Related Resources +// +// * PutBucketLifecycleConfiguration +// +// * GetBucketNotificationConfiguration +// +// * SQL Reference for Amazon S3 Select and Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide +// // // Example sending a request using RestoreObjectRequest. // req := client.RestoreObjectRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_UploadPart.go b/service/s3/api_op_UploadPart.go index 05d48c571c9..58d2a097a26 100644 --- a/service/s3/api_op_UploadPart.go +++ b/service/s3/api_op_UploadPart.go @@ -210,8 +210,8 @@ type UploadPartOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (KMS) customer + // master key (CMK) was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 @@ -272,12 +272,87 @@ const opUploadPart = "UploadPart" // // Uploads a part in a multipart upload. // +// In this operation, you provide part data in your request. However, you have +// an option to specify your existing Amazon S3 object as a data source for +// the part you are uploading. To upload a part from an existing object, you +// use the UploadPartCopy operation. +// +// You must initiate a multipart upload (see CreateMultipartUpload) before you +// can upload any part. In response to your initiate request, Amazon S3 returns +// an upload ID, a unique identifier, that you must include in your upload part +// request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object +// being created. If you upload a new part using the same part number that was +// used with a previous part, the previously uploaded part is overwritten. Each +// part must be at least 5 MB in size, except the last part. There is no size +// limit on the last part of your multipart upload. +// +// To ensure that data is not corrupted when traversing the network, specify +// the Content-MD5 header in the upload part request. Amazon S3 checks the part +// data against the provided MD5 value. If they do not match, Amazon S3 returns +// an error. +// // Note: After you initiate multipart upload and upload one or more parts, you // must either complete or abort multipart upload in order to stop getting charged // for storage of the uploaded parts. Only after you either complete or abort // multipart upload, Amazon S3 frees up the parts storage and stops charging // you for the parts storage. // +// For more information on multipart uploads, go to Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the +// Amazon Simple Storage Service Developer Guide . +// +// For information on the permissions required to use the multipart upload API, +// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can optionally request server-side encryption where Amazon S3 encrypts +// your data as it writes it to disks in its data centers and decrypts it for +// you when you access it. You have the option of providing your own encryption +// key, or you can use the AWS-managed encryption keys. If you choose to provide +// your own encryption key, the request headers you provide in the request must +// match the headers you used in the request to initiate the upload by using +// CreateMultipartUpload. For more information, go to Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side encryption is supported by the S3 Multipart Upload actions. Unless +// you are using a customer-provided encryption key, you don't need to specify +// the encryption parameters in each UploadPart request. Instead, you only need +// to specify the server side encryption parameters in the initial Initiate +// Multipart request. For more information, see CreateMultipartUpload. +// +// If you requested server-side encryption using a customer-provided encryption +// key in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following headers. +// +// * x-amz-server-side​-encryption​-customer-algorithm +// +// * x-amz-server-side​-encryption​-customer-key +// +// * x-amz-server-side​-encryption​-customer-key-MD5 +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code +// Prefix: Client +// +// Related Resources +// +// * CreateMultipartUpload +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads +// // // Example sending a request using UploadPartRequest. // req := client.UploadPartRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/s3/api_op_UploadPartCopy.go b/service/s3/api_op_UploadPartCopy.go index 78eeeb31985..60bd94fee0b 100644 --- a/service/s3/api_op_UploadPartCopy.go +++ b/service/s3/api_op_UploadPartCopy.go @@ -14,6 +14,8 @@ import ( type UploadPartCopyInput struct { _ struct{} `type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -56,6 +58,8 @@ type UploadPartCopyInput struct { // key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -265,6 +269,7 @@ func (s UploadPartCopyInput) MarshalFields(e protocol.FieldEncoder) error { type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` + // Container for all response elements. CopyPartResult *CopyPartResult `type:"structure"` // The version of the source object that was copied, if you have enabled versioning @@ -285,8 +290,8 @@ type UploadPartCopyOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (KMS) customer + // master key (CMK) that was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 @@ -351,7 +356,93 @@ const opUploadPartCopy = "UploadPartCopy" // UploadPartCopyRequest returns a request value for making API operation for // Amazon Simple Storage Service. // -// Uploads a part by copying data from an existing object as data source. +// Uploads a part by copying data from an existing object as data source. You +// specify the data source by adding the request header x-amz-copy-source in +// your request and a byte range by adding the request header x-amz-copy-source-range +// in your request. +// +// The minimum allowable part size for a multipart upload is 5 MB. For more +// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Instead of using an existing object as part data, you might use the UploadPart +// operation and provide data in your request. +// +// You must initiate a multipart upload before you can upload any part. In response +// to your initiate request. Amazon S3 returns a unique identifier, the upload +// ID, that you must include in your upload part request. +// +// For more information on using the UploadPartCopy operation, see the following +// topics: +// +// * For conceptual information on multipart uploads, go to Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information on permissions required to use the multipart upload +// API, go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about copying objects using a single atomic operation +// vs. the multipart upload, go to Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about using server-side encryption with customer-provided +// encryption keys with the UploadPartCopy operation, see CopyObject and +// UploadPart. +// +// Note the following additional considerations about the request headers x-amz-copy-source-if-match, +// x-amz-copy-source-if-none-match x-amz-copy-source-if-unmodified-since x-amz-copy-source-if-modified-since +// +// * Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request as follows: x-amz-copy-source-if-match +// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since +// condition evaluates to false; then, S3 returns 200 OK and copies the data. +// +// * Consideration 2 - If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request +// as follows: x-amz-copy-source-if-none-match condition evaluates to false, +// and; x-amz-copy-source-if-modified-since condition evaluates to true; +// then, S3 returns 412 Precondition Failed response code. +// +// Versioning +// +// If your bucket has versioning enabled, you could have multiple versions of +// the same object. By default, x-amz-copy-source identifies the current version +// of the object to copy. If the current version is a delete marker and you +// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 +// error, because the object does not exist. If you specify versionId in the +// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns +// an HTTP 400 error, because you are not allowed to specify a delete marker +// as a version for the x-amz-copy-source. +// +// You can optionally specify a specific version of the source object to copy +// by adding the versionId subresource as shown in the following example: +// +// x-amz-copy-source: /bucket/object?versionId=version id +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found +// +// * Code: InvalidRequest Cause: The specified copy source is not supported +// as a byte-range copy source. HTTP Status Code: 400 Bad Request +// +// Related Resources +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads // // // Example sending a request using UploadPartCopyRequest. // req := client.UploadPartCopyRequest(params) diff --git a/service/s3/api_types.go b/service/s3/api_types.go index 98017e7311a..992830c4de4 100644 --- a/service/s3/api_types.go +++ b/service/s3/api_types.go @@ -227,9 +227,6 @@ func (s AnalyticsAndOperator) MarshalFields(e protocol.FieldEncoder) error { // Specifies the configuration and any analyses for the analytics filter of // an Amazon S3 bucket. -// -// For more information, see GET Bucket analytics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETAnalyticsConfig.html) -// in the Amazon Simple Storage Service API Reference. type AnalyticsConfiguration struct { _ struct{} `type:"structure"` @@ -351,6 +348,9 @@ func (s AnalyticsExportDestination) MarshalFields(e protocol.FieldEncoder) error return nil } +// The filter used to describe a set of objects for analyses. A filter must +// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). +// If no filter is provided, all objects will be considered in any analysis. type AnalyticsFilter struct { _ struct{} `type:"structure"` @@ -413,6 +413,7 @@ func (s AnalyticsFilter) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Contains information about where to publish the analytics results. type AnalyticsS3BucketDestination struct { _ struct{} `type:"structure"` @@ -492,6 +493,8 @@ func (s AnalyticsS3BucketDestination) MarshalFields(e protocol.FieldEncoder) err return nil } +// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name +// is globally unique, and the namespace is shared by all AWS accounts. type Bucket struct { _ struct{} `type:"structure"` @@ -580,6 +583,7 @@ func (s BucketLifecycleConfiguration) MarshalFields(e protocol.FieldEncoder) err return nil } +// Container for logging status information. type BucketLoggingStatus struct { _ struct{} `type:"structure"` @@ -628,7 +632,8 @@ func (s BucketLoggingStatus) MarshalFields(e protocol.FieldEncoder) error { type CORSConfiguration struct { _ struct{} `type:"structure"` - // A set of allowed origins and methods. + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. // // CORSRules is a required field CORSRules []CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` @@ -790,7 +795,8 @@ func (s CORSRule) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Describes how a CSV-formatted input object is formatted. +// Describes how a uncompressed comma-separated values (CSV)-formatted input +// object is formatted. type CSVInput struct { _ struct{} `type:"structure"` @@ -799,24 +805,45 @@ type CSVInput struct { // to TRUE may lower performance. AllowQuotedRecordDelimiter *bool `type:"boolean"` - // The single character used to indicate a row should be ignored when present - // at the start of a row. + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character + // to indicate a comment line. Comments *string `type:"string"` - // The value used to separate individual fields in a record. + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. FieldDelimiter *string `type:"string"` - // Describes the first line of input. Valid values: None, Ignore, Use. + // Describes the first line of input. Valid values are: + // + // * NONE: First line is not a header. + // + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such + // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). + // + // * Use: First line is a header, and you can use the header value to identify + // a column in an expression (SELECT "name" FROM OBJECT). FileHeaderInfo FileHeaderInfo `type:"string" enum:"true"` - // Value used for escaping where the field delimiter is part of the value. + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + // + // Type: String + // + // Default: " + // + // Ancestors: CSV QuoteCharacter *string `type:"string"` - // The single character used for escaping the quote character inside an already - // escaped value. + // A single character used for escaping the quotation mark character inside + // an already escaped value. For example, the value """ a , b """ is parsed + // as " a , b ". QuoteEscapeCharacter *string `type:"string"` - // The value used to separate individual records. + // A single character used to separate individual records in the input. Instead + // of the default value, you can specify an arbitrary delimiter. RecordDelimiter *string `type:"string"` } @@ -872,24 +899,33 @@ func (s CSVInput) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Describes how CSV-formatted results are formatted. +// Describes how uncompressed comma-separated values (CSV)-formatted results +// are formatted. type CSVOutput struct { _ struct{} `type:"structure"` - // The value used to separate individual fields in a record. + // The value used to separate individual fields in a record. You can specify + // an arbitrary delimiter. FieldDelimiter *string `type:"string"` - // The value used for escaping where the field delimiter is part of the value. + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". QuoteCharacter *string `type:"string"` - // Th single character used for escaping the quote character inside an already + // The single character used for escaping the quote character inside an already // escaped value. QuoteEscapeCharacter *string `type:"string"` - // Indicates whether or not all output fields should be quoted. + // Indicates whether to use quotation marks around output fields. + // + // * ALWAYS: Always use quotation marks for output fields. + // + // * ASNEEDED: Use quotation marks for output fields when needed. QuoteFields QuoteFields `type:"string" enum:"true"` - // The value used to separate individual records. + // A single character used to separate individual records in the output. Instead + // of the default value, you can specify an arbitrary delimiter. RecordDelimiter *string `type:"string"` } @@ -933,20 +969,25 @@ func (s CSVOutput) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for specifying the AWS Lambda notification configuration. type CloudFunctionConfiguration struct { _ struct{} `type:"structure"` + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. CloudFunction *string `type:"string"` // The bucket event for which to send notifications. Event Event `deprecated:"true" type:"string" enum:"true"` + // Bucket events for which to send notifications. Events []Event `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` + // The role supporting the invocation of the lambda function InvocationRole *string `type:"string"` } @@ -996,9 +1037,15 @@ func (s CloudFunctionConfiguration) MarshalFields(e protocol.FieldEncoder) error return nil } +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act +// like subdirectories in the directory specified by Prefix. For example, if +// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, +// the common prefix is notes/summer/. type CommonPrefix struct { _ struct{} `type:"structure"` + // Container for the specified common prefix. Prefix *string `type:"string"` } @@ -1018,9 +1065,11 @@ func (s CommonPrefix) MarshalFields(e protocol.FieldEncoder) error { return nil } +// The container for the completed multipart upload details. type CompletedMultipartUpload struct { _ struct{} `type:"structure"` + // Array of CompletedPart data types. Parts []CompletedPart `locationName:"Part" type:"list" flattened:"true"` } @@ -1046,6 +1095,7 @@ func (s CompletedMultipartUpload) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Details of the parts that were uploaded. type CompletedPart struct { _ struct{} `type:"structure"` @@ -1079,7 +1129,10 @@ func (s CompletedPart) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Specifies a condition that must be met for a redirect to apply. +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. type Condition struct { _ struct{} `type:"structure"` @@ -1122,11 +1175,16 @@ func (s Condition) MarshalFields(e protocol.FieldEncoder) error { return nil } +// >Container for all response elements. type CopyObjectResult struct { _ struct{} `type:"structure"` + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. The source and destination ETag + // is identical for a successfully copied object. ETag *string `type:"string"` + // Returns the date that the object was last modified. LastModified *time.Time `type:"timestamp"` } @@ -1153,6 +1211,7 @@ func (s CopyObjectResult) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for all response elements. type CopyPartResult struct { _ struct{} `type:"structure"` @@ -1186,6 +1245,7 @@ func (s CopyPartResult) MarshalFields(e protocol.FieldEncoder) error { return nil } +// The configuration information for the bucket. type CreateBucketConfiguration struct { _ struct{} `type:"structure"` @@ -1210,7 +1270,7 @@ func (s CreateBucketConfiguration) MarshalFields(e protocol.FieldEncoder) error return nil } -// The container element for specifying the default object lock retention settings +// The container element for specifying the default Object Lock retention settings // for new objects placed in the specified bucket. type DefaultRetention struct { _ struct{} `type:"structure"` @@ -1218,7 +1278,7 @@ type DefaultRetention struct { // The number of days that you want to specify for the default retention period. Days *int64 `type:"integer"` - // The default object lock retention mode you want to apply to new objects placed + // The default Object Lock retention mode you want to apply to new objects placed // in the specified bucket. Mode ObjectLockRetentionMode `type:"string" enum:"true"` @@ -1254,9 +1314,12 @@ func (s DefaultRetention) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for the objects to delete. type Delete struct { _ struct{} `type:"structure"` + // The objects to delete. + // // Objects is a required field Objects []ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` @@ -1314,6 +1377,7 @@ func (s Delete) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Information about the delete marker. type DeleteMarkerEntry struct { _ struct{} `type:"structure"` @@ -1327,6 +1391,7 @@ type DeleteMarkerEntry struct { // Date and time the object was last modified. LastModified *time.Time `type:"timestamp"` + // The account that created the delete marker.> Owner *Owner `type:"structure"` // Version ID of an object. @@ -1374,11 +1439,21 @@ func (s DeleteMarkerEntry) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Specifies whether Amazon S3 should replicate delete makers. +// Specifies whether Amazon S3 replicates the delete markers. If you specify +// a Filter, you must specify this element. However, in the latest version of +// replication configuration (when Filter is specified), Amazon S3 doesn't replicate +// delete markers. Therefore, the DeleteMarkerReplication element can contain +// only Disabled. For an example configuration, see Basic Rule +// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// +// If you don't specify the Filter element, Amazon S3 assumes the replication +// configuration is the earlier version, V1. In the earlier version, Amazon +// S3 handled replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). type DeleteMarkerReplication struct { _ struct{} `type:"structure"` - // The status of the delete marker replication. + // Indicates whether to replicate delete markers. // // In the current implementation, Amazon S3 doesn't replicate the delete markers. // The status must be Disabled. @@ -1401,15 +1476,24 @@ func (s DeleteMarkerReplication) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Information about the deleted object. type DeletedObject struct { _ struct{} `type:"structure"` + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. DeleteMarker *bool `type:"boolean"` + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header + // is the version ID of the object version deleted. DeleteMarkerVersionId *string `type:"string"` + // The name of the deleted object. Key *string `min:"1" type:"string"` + // The version ID of the deleted object. VersionId *string `type:"string"` } @@ -1463,17 +1547,12 @@ type Destination struct { // direct Amazon S3 to change replica ownership to the AWS account that owns // the destination bucket by specifying the AccessControlTranslation property, // this is the account ID of the destination bucket owner. For more information, - // see Cross-Region Replication Additional Configuration: Change Replica Owner - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-change-owner.html) in - // the Amazon Simple Storage Service Developer Guide. + // see Replication Additional Configuration: Change Replica Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon Simple Storage Service Developer Guide. Account *string `type:"string"` // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to - // store replicas of the object identified by the rule. - // - // A replication configuration can replicate objects to only one destination - // bucket. If there are multiple rules in your replication configuration, all - // rules must specify the same destination bucket. + // store the results. // // Bucket is a required field Bucket *string `type:"string" required:"true"` @@ -1482,6 +1561,16 @@ type Destination struct { // is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + // A container specifying replication metrics-related information, including + // whether emitting metrics and Amazon S3 events for replication are enabled. + // In addition, contains configurations related to specific metrics or events. + // Must be specified together with a ReplicationTime block. + Metrics *Metrics `type:"structure"` + + // A container specifying the time when all objects and operations on objects + // are replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime `type:"structure"` + // The storage class to use when replicating objects, such as standard or reduced // redundancy. By default, Amazon S3 uses the storage class of the source object // to create the object replica. @@ -1509,6 +1598,16 @@ func (s *Destination) Validate() error { invalidParams.AddNested("AccessControlTranslation", err.(aws.ErrInvalidParams)) } } + if s.Metrics != nil { + if err := s.Metrics.Validate(); err != nil { + invalidParams.AddNested("Metrics", err.(aws.ErrInvalidParams)) + } + } + if s.ReplicationTime != nil { + if err := s.ReplicationTime.Validate(); err != nil { + invalidParams.AddNested("ReplicationTime", err.(aws.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1549,6 +1648,18 @@ func (s Destination) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "EncryptionConfiguration", v, metadata) } + if s.Metrics != nil { + v := s.Metrics + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Metrics", v, metadata) + } + if s.ReplicationTime != nil { + v := s.ReplicationTime + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ReplicationTime", v, metadata) + } if len(s.StorageClass) > 0 { v := s.StorageClass @@ -1558,8 +1669,7 @@ func (s Destination) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Describes the server-side encryption that will be applied to the restore -// results. +// Contains the type of server-side encryption used. type Encryption struct { _ struct{} `type:"structure"` @@ -1645,15 +1755,375 @@ func (s EncryptionConfiguration) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for all error elements. type Error struct { _ struct{} `type:"structure"` + // The error code is a string that uniquely identifies an error condition. It + // is meant to be read and understood by programs that detect and handle errors + // by type. + // + // Amazon S3 error codes + // + // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AccountProblem Description: There is a problem with your AWS account + // that prevents the operation from completing successfully. Contact AWS + // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource + // has been disabled. Contact AWS Support for further assistance. HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AmbiguousGrantByEmailAddress Description: The email address you + // provided is associated with more than one account. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed Description: The authorization header + // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status + // Code: N/A + // + // * Code: BadDigest Description: The Content-MD5 you specified did not match + // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: BucketAlreadyExists Description: The requested bucket name is + // not available. The bucket namespace is shared by all users of the system. + // Please select a different name and try again. HTTP Status Code: 409 Conflict + // SOAP Fault Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create + // already exists, and you own it. Amazon S3 returns this error in all AWS + // Regions except in the North Virginia region. For legacy compatibility, + // if you re-create an existing bucket that you already own in the North + // Virginia region, Amazon S3 returns 200 OK and resets the bucket access + // control lists (ACLs). Code: 409 Conflict (in all regions except the North + // Virginia region) SOAP Fault Code Prefix: Client + // + // * Code: BucketNotEmpty Description: The bucket you tried to delete is + // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: CredentialsNotSupported Description: This request does not support + // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited Description: Cross-location logging + // not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall Description: Your proposed upload is smaller than + // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum + // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: ExpiredToken Description: The provided token has expired. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IllegalVersioningConfigurationException Description: Indicates + // that the versioning configuration specified in the request is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncompleteBody Description: You did not provide the number of + // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires + // exactly one file upload per request. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum + // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InternalError Description: We encountered an internal error. Please + // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code + // Prefix: Server + // + // * Code: InvalidAccessKeyId Description: The AWS access key ID you provided + // does not exist in our records. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidAddressingHeader Description: You must specify the Anonymous + // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName Description: The specified bucket is not valid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState Description: The request is not valid with + // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidDigest Description: The Content-MD5 you specified is not + // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidEncryptionAlgorithmError Description: The encryption request + // you specified is not valid. The valid value is AES256. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint Description: The specified location + // constraint is not valid. For more information about Regions, see How to + // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidObjectState Description: The operation is not valid for + // the current state of the object. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidPart Description: One or more of the specified parts could + // not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder Description: The list of parts was not in ascending + // order. Parts list must be specified in order by part number. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPayer Description: All access to this object has been disabled. + // Please contact AWS Support for further assistance. HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidPolicyDocument Description: The content of the form does + // not meet the conditions specified in the policy document. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidRange Description: The requested range cannot be satisfied. + // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: SOAP requests must be made over an + // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with non-DNS compliant names. HTTP Status Code: + // 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with periods (.) in their names. HTTP Status + // Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint + // only supports virtual style requests. HTTP Status Code: 400 Bad Request + // Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not + // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled + // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported on this bucket. Contact AWS Support for more information. + // HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot + // be enabled on this bucket. Contact AWS Support for more information. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidSecurity Description: The provided security credentials + // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass Description: The storage class you specified + // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidTargetBucketForLogging Description: The target bucket for + // logging does not exist, is not owned by you, or does not have the appropriate + // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidToken Description: The provided token is malformed or otherwise + // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: KeyTooLongError Description: Your key is too long. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedACLError Description: The XML you provided was not well-formed + // or did not validate against our published schema. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest Description: The body of your POST request + // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: MalformedXML Description: This happens when the user sends malformed + // XML (XML that doesn't conform to the published XSD) for the configuration. + // The error message is, "The XML you provided was not well-formed or did + // not validate against our published schema." HTTP Status Code: 400 Bad + // Request SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded Description: Your request was too big. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError Description: Your POST request + // fields preceding the upload file were too large. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MetadataTooLarge Description: Your metadata headers exceed the + // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: MethodNotAllowed Description: The specified method is not allowed + // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault + // Code Prefix: Client + // + // * Code: MissingAttachment Description: A SOAP attachment was expected, + // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength Description: You must provide the Content-Length + // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: + // Client + // + // * Code: MissingRequestBodyError Description: This happens when the user + // sends an empty XML document as a request. The error message is, "Request + // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing + // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: MissingSecurityHeader Description: Your request is missing a required + // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: NoLoggingStatusForKey Description: There is no such thing as a + // logging status subresource for a key. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket Description: The specified bucket does not exist. + // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy Description: The specified bucket does not + // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code + // Prefix: Client + // + // * Code: NoSuchKey Description: The specified key does not exist. HTTP + // Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration + // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: + // Client + // + // * Code: NoSuchUpload Description: The specified multipart upload does + // not exist. The upload ID might be invalid, or the multipart upload might + // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault + // Code Prefix: Client + // + // * Code: NoSuchVersion Description: Indicates that the version ID specified + // in the request does not match an existing version. HTTP Status Code: 404 + // Not Found SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented Description: A header you provided implies functionality + // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault + // Code Prefix: Server + // + // * Code: NotSignedUp Description: Your account is not signed up for the + // Amazon S3 service. You must sign up before you can use Amazon S3. You + // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted Description: A conflicting conditional operation + // is currently in progress against this resource. Try again. HTTP Status + // Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: PermanentRedirect Description: The bucket you are attempting to + // access must be addressed using the specified endpoint. Send all future + // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP + // Fault Code Prefix: Client + // + // * Code: PreconditionFailed Description: At least one of the preconditions + // you specified did not hold. HTTP Status Code: 412 Precondition Failed + // SOAP Fault Code Prefix: Client + // + // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 + // Moved Temporarily SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress Description: Object restore is already + // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be + // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeout Description: Your socket connection to the server + // was not read from or written to within the timeout period. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed Description: The difference between the request + // time and the server's time is too large. HTTP Status Code: 403 Forbidden + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTorrentOfBucketError Description: Requesting the torrent + // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch Description: The request signature we calculated + // does not match the signature you provided. Check your AWS secret access + // key and signing method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP + // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // + // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: + // 503 Slow Down SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect Description: You are being redirected to the + // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP + // Fault Code Prefix: Client + // + // * Code: TokenRefreshRequired Description: The provided token must be refreshed. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: TooManyBuckets Description: You have attempted to create more + // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: UnexpectedContent Description: This request does not support content. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UnresolvableGrantByEmailAddress Description: The email address + // you provided does not match any account on record. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain + // the specified field name. If it is specified, check the order of the fields. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client Code *string `type:"string"` + // The error key. Key *string `min:"1" type:"string"` + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they + // don't know how or don't care to handle. Sophisticated programs with more + // exhaustive error handling and proper internationalization are more likely + // to ignore the error message. Message *string `type:"string"` + // The version ID of the error. VersionId *string `type:"string"` } @@ -1691,6 +2161,7 @@ func (s Error) MarshalFields(e protocol.FieldEncoder) error { return nil } +// The error information. type ErrorDocument struct { _ struct{} `type:"structure"` @@ -1733,6 +2204,46 @@ func (s ErrorDocument) MarshalFields(e protocol.FieldEncoder) error { return nil } +// A container that specifies information about existing object replication. +// You can choose whether to enable or disable the replication of existing objects. +type ExistingObjectReplication struct { + _ struct{} `type:"structure"` + + // Specifies whether existing object replication is enabled. + // + // Status is a required field + Status ExistingObjectReplicationStatus `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s ExistingObjectReplication) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExistingObjectReplication) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ExistingObjectReplication"} + if len(s.Status) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ExistingObjectReplication) MarshalFields(e protocol.FieldEncoder) error { + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Status", v, metadata) + } + return nil +} + // Specifies the Amazon S3 object key name to filter on and whether to filter // on the suffix or prefix of the key name. type FilterRule struct { @@ -1771,6 +2282,7 @@ func (s FilterRule) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for Glacier job parameters. type GlacierJobParameters struct { _ struct{} `type:"structure"` @@ -1809,9 +2321,11 @@ func (s GlacierJobParameters) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for grant information. type Grant struct { _ struct{} `type:"structure"` + // The person being granted permissions. Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Specifies the permission given to the grantee. @@ -1861,6 +2375,7 @@ func (s Grant) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for the person being granted permissions. type Grantee struct { _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` @@ -1930,6 +2445,7 @@ func (s Grantee) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for the Suffix element. type IndexDocument struct { _ struct{} `type:"structure"` @@ -1972,6 +2488,7 @@ func (s IndexDocument) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container element that identifies who initiated the ultipart upload. type Initiator struct { _ struct{} `type:"structure"` @@ -2202,6 +2719,7 @@ func (s InventoryConfiguration) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Specifies the inventory configuration for an Amazon S3 bucket. type InventoryDestination struct { _ struct{} `type:"structure"` @@ -2296,6 +2814,8 @@ func (s InventoryEncryption) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Specifies an inventory filter. The inventory only includes objects that meet +// the filter's criteria. type InventoryFilter struct { _ struct{} `type:"structure"` @@ -2335,6 +2855,8 @@ func (s InventoryFilter) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. type InventoryS3BucketDestination struct { _ struct{} `type:"structure"` @@ -2429,6 +2951,7 @@ func (s InventoryS3BucketDestination) MarshalFields(e protocol.FieldEncoder) err return nil } +// Specifies the schedule for generating inventory results. type InventorySchedule struct { _ struct{} `type:"structure"` @@ -2467,6 +2990,7 @@ func (s InventorySchedule) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Specifies JSON as object's input serialization format. type JSONInput struct { _ struct{} `type:"structure"` @@ -2490,6 +3014,7 @@ func (s JSONInput) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Specifies JSON as request's output serialization format. type JSONOutput struct { _ struct{} `type:"structure"` @@ -2598,9 +3123,12 @@ func (s LambdaFunctionConfiguration) MarshalFields(e protocol.FieldEncoder) erro return nil } +// Container for lifecycle rules. You can add as many as 1000 rules. type LifecycleConfiguration struct { _ struct{} `type:"structure"` + // Specifies lifecycle configuration rules for an Amazon S3 bucket. + // // Rules is a required field Rules []Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } @@ -2648,6 +3176,7 @@ func (s LifecycleConfiguration) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for the expiration for the lifecycle of the object. type LifecycleExpiration struct { _ struct{} `type:"structure"` @@ -2695,6 +3224,7 @@ func (s LifecycleExpiration) MarshalFields(e protocol.FieldEncoder) error { return nil } +// A lifecycle rule for individual objects in an Amazon S3 bucket. type LifecycleRule struct { _ struct{} `type:"structure"` @@ -2705,6 +3235,8 @@ type LifecycleRule struct { // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. Expiration *LifecycleExpiration `type:"structure"` // The Filter is used to identify objects that a Lifecycle Rule applies to. @@ -2721,6 +3253,11 @@ type LifecycleRule struct { // period in the object's lifetime. NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to the a specific storage class. If your bucket + // is versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to the a + // specifc storage class at a set period in the object's lifetime. NoncurrentVersionTransitions []NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` // Prefix identifying one or more objects to which the rule applies. This is @@ -2733,6 +3270,7 @@ type LifecycleRule struct { // Status is a required field Status ExpirationStatus `type:"string" required:"true" enum:"true"` + // Specifies when an Amazon S3 object transitions to a specified storage class. Transitions []Transition `locationName:"Transition" type:"list" flattened:"true"` } @@ -2836,6 +3374,7 @@ func (s LifecycleRule) MarshalFields(e protocol.FieldEncoder) error { type LifecycleRuleAndOperator struct { _ struct{} `type:"structure"` + // Prefix identifying one or more objects to which the rule applies. Prefix *string `type:"string"` // All of these tags must exist in the object's tag set in order for the rule @@ -2970,6 +3509,7 @@ type LoggingEnabled struct { // TargetBucket is a required field TargetBucket *string `type:"string" required:"true"` + // Container for granting information. TargetGrants []TargetGrant `locationNameList:"Grant" type:"list"` // A prefix for all log object keys. If you store log files from multiple Amazon @@ -3043,8 +3583,10 @@ func (s LoggingEnabled) MarshalFields(e protocol.FieldEncoder) error { type MetadataEntry struct { _ struct{} `type:"structure"` + // Name of the Object. Name *string `type:"string"` + // Value of the Object. Value *string `type:"string"` } @@ -3070,6 +3612,67 @@ func (s MetadataEntry) MarshalFields(e protocol.FieldEncoder) error { return nil } +// A container specifying replication metrics-related information, including +// whether emitting metrics and Amazon S3 events for replication are enabled. +// In addition, contains configurations related to specific metrics or events. +// Must be specified together with a ReplicationTime block. +type Metrics struct { + _ struct{} `type:"structure"` + + // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold + // event. + // + // EventThreshold is a required field + EventThreshold *ReplicationTimeValue `type:"structure" required:"true"` + + // Specifies whether the replication metrics are enabled. + // + // Status is a required field + Status MetricsStatus `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s Metrics) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Metrics) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "Metrics"} + + if s.EventThreshold == nil { + invalidParams.Add(aws.NewErrParamRequired("EventThreshold")) + } + if len(s.Status) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Metrics) MarshalFields(e protocol.FieldEncoder) error { + if s.EventThreshold != nil { + v := s.EventThreshold + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "EventThreshold", v, metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Status", v, metadata) + } + return nil +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. type MetricsAndOperator struct { _ struct{} `type:"structure"` @@ -3187,6 +3790,9 @@ func (s MetricsConfiguration) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, +// a tag, or a conjunction (MetricsAndOperator). type MetricsFilter struct { _ struct{} `type:"structure"` @@ -3250,6 +3856,7 @@ func (s MetricsFilter) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for the MultipartUpload for the Amazon S3 object. type MultipartUpload struct { _ struct{} `type:"structure"` @@ -3262,6 +3869,7 @@ type MultipartUpload struct { // Key of the object for which the multipart upload was initiated. Key *string `min:"1" type:"string"` + // Specifies the owner of the object that is part of the multipart upload. Owner *Owner `type:"structure"` // The class of storage used to store the object. @@ -3491,10 +4099,17 @@ func (s NotificationConfiguration) MarshalFields(e protocol.FieldEncoder) error type NotificationConfigurationDeprecated struct { _ struct{} `type:"structure"` + // Container for specifying the AWS Lambda notification configuration. CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + // This data type is deprecated. This data type specifies the configuration + // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue + // when Amazon S3 detects specified events. QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + // This data type is deperecated. A container for specifying the configuration + // for publication of messages to an Amazon Simple Notification Service (Amazon + // SNS) topic when Amazon S3 detects specified events. TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` } @@ -3552,17 +4167,25 @@ func (s NotificationConfigurationFilter) MarshalFields(e protocol.FieldEncoder) return nil } +// An object consists of data and its descriptive metadata. type Object struct { _ struct{} `type:"structure"` + // The entity tag is an MD5 hash of the object. ETag reflects only changes to + // the contents of an object, not its metadata. ETag *string `type:"string"` + // The name that you assign to an object. You use the object key to retrieve + // the object. Key *string `min:"1" type:"string"` + // The date the Object was Last Modified LastModified *time.Time `type:"timestamp"` + // The owner of the object Owner *Owner `type:"structure"` + // Size in bytes of the object Size *int64 `type:"integer"` // The class of storage used to store the object. @@ -3616,6 +4239,7 @@ func (s Object) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Object Identifier is unique value to identify objects. type ObjectIdentifier struct { _ struct{} `type:"structure"` @@ -3667,14 +4291,14 @@ func (s ObjectIdentifier) MarshalFields(e protocol.FieldEncoder) error { return nil } -// The container element for object lock configuration parameters. +// The container element for Object Lock configuration parameters. type ObjectLockConfiguration struct { _ struct{} `type:"structure"` - // Indicates whether this bucket has an object lock configuration enabled. + // Indicates whether this bucket has an Object Lock configuration enabled. ObjectLockEnabled ObjectLockEnabled `type:"string" enum:"true"` - // The object lock rule in place for the specified object. + // The Object Lock rule in place for the specified object. Rule *ObjectLockRule `type:"structure"` } @@ -3731,7 +4355,7 @@ type ObjectLockRetention struct { // Indicates the Retention mode for the specified object. Mode ObjectLockRetentionMode `type:"string" enum:"true"` - // The date on which this object lock retention expires. + // The date on which this Object Lock Retention will expire. RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` } @@ -3758,7 +4382,7 @@ func (s ObjectLockRetention) MarshalFields(e protocol.FieldEncoder) error { return nil } -// The container element for an object lock rule. +// The container element for an Object Lock rule. type ObjectLockRule struct { _ struct{} `type:"structure"` @@ -3783,9 +4407,11 @@ func (s ObjectLockRule) MarshalFields(e protocol.FieldEncoder) error { return nil } +// The version of an object. type ObjectVersion struct { _ struct{} `type:"structure"` + // The entity tag is an MD5 hash of that version of the object ETag *string `type:"string"` // Specifies whether the object is (true) or is not (false) the latest version @@ -3798,6 +4424,7 @@ type ObjectVersion struct { // Date and time the object was last modified. LastModified *time.Time `type:"timestamp"` + // Specifies the Owner of the object. Owner *Owner `type:"structure"` // Size in bytes of the object. @@ -3941,11 +4568,14 @@ func (s OutputSerialization) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for the owner's display name and ID type Owner struct { _ struct{} `type:"structure"` + // Container for the display name of the owner DisplayName *string `type:"string"` + // Container for the ID of the owner ID *string `type:"string"` } @@ -3971,6 +4601,7 @@ func (s Owner) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for Parquet. type ParquetInput struct { _ struct{} `type:"structure"` } @@ -3985,6 +4616,7 @@ func (s ParquetInput) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for elements related to a part. type Part struct { _ struct{} `type:"structure"` @@ -4062,7 +4694,11 @@ func (s PolicyStatus) MarshalFields(e protocol.FieldEncoder) error { return nil } -// Specifies the Block Public Access configuration for an Amazon S3 bucket. +// The PublicAccessBlock configuration that you want to apply to this Amazon +// S3 bucket. You can enable the configuration options in any combination. For +// more information about when Amazon S3 considers a bucket or object public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev//access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon Simple Storage Service Developer Guide. type PublicAccessBlockConfiguration struct { _ struct{} `type:"structure"` @@ -4075,6 +4711,8 @@ type PublicAccessBlockConfiguration struct { // // * PUT Object calls fail if the request includes a public ACL. // + // * PUT Bucket calls fail if the request includes a public ACL. + // // Enabling this setting doesn't affect existing policies or ACLs. BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` @@ -4143,6 +4781,8 @@ func (s PublicAccessBlockConfiguration) MarshalFields(e protocol.FieldEncoder) e type QueueConfiguration struct { _ struct{} `type:"structure"` + // A collection of bucket events for which to send notiications + // // Events is a required field Events []Event `locationName:"Event" type:"list" flattened:"true" required:"true"` @@ -4220,18 +4860,25 @@ func (s QueueConfiguration) MarshalFields(e protocol.FieldEncoder) error { return nil } +// This data type is deprecated. Please use QueueConfiguration for the same +// purposes. This dat type specifies the configuration for publishing messages +// to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects +// specified events. type QueueConfigurationDeprecated struct { _ struct{} `type:"structure"` // The bucket event for which to send notifications. Event Event `deprecated:"true" type:"string" enum:"true"` + // A collection of bucket events for which to send notiications Events []Event `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. Queue *string `type:"string"` } @@ -4403,7 +5050,7 @@ type ReplicationConfiguration struct { // The Amazon Resource Name (ARN) of the AWS Identity and Access Management // (IAM) role that Amazon S3 assumes when replicating objects. For more information, - // see How to Set Up Cross-Region Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-how-setup.html) + // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) // in the Amazon Simple Storage Service Developer Guide. // // Role is a required field @@ -4473,7 +5120,17 @@ func (s ReplicationConfiguration) MarshalFields(e protocol.FieldEncoder) error { type ReplicationRule struct { _ struct{} `type:"structure"` - // Specifies whether Amazon S3 should replicate delete makers. + // Specifies whether Amazon S3 replicates the delete markers. If you specify + // a Filter, you must specify this element. However, in the latest version of + // replication configuration (when Filter is specified), Amazon S3 doesn't replicate + // delete markers. Therefore, the DeleteMarkerReplication element can contain + // only Disabled. For an example configuration, see Basic Rule + // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // + // If you don't specify the Filter element, Amazon S3 assumes the replication + // configuration is the earlier version, V1. In the earlier version, Amazon + // S3 handled replication of delete markers differently. For more information, + // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` // A container for information about the replication destination. @@ -4481,6 +5138,10 @@ type ReplicationRule struct { // Destination is a required field Destination *Destination `type:"structure" required:"true"` + // A container that specifies information about existing object replication. + // You can choose whether to enable or disable the replication of existing objects. + ExistingObjectReplication *ExistingObjectReplication `type:"structure"` + // A filter that identifies the subset of objects to which the replication rule // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. Filter *ReplicationRuleFilter `type:"structure"` @@ -4504,7 +5165,7 @@ type ReplicationRule struct { // * Same object qualify tag based filter criteria specified in multiple // rules // - // For more information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. Priority *int64 `type:"integer"` @@ -4512,7 +5173,7 @@ type ReplicationRule struct { // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using - // an AWS KMS-Managed Key (SSE-KMS). + // a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` // Specifies whether the rule is enabled. @@ -4541,6 +5202,11 @@ func (s *ReplicationRule) Validate() error { invalidParams.AddNested("Destination", err.(aws.ErrInvalidParams)) } } + if s.ExistingObjectReplication != nil { + if err := s.ExistingObjectReplication.Validate(); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(aws.ErrInvalidParams)) + } + } if s.Filter != nil { if err := s.Filter.Validate(); err != nil { invalidParams.AddNested("Filter", err.(aws.ErrInvalidParams)) @@ -4572,6 +5238,12 @@ func (s ReplicationRule) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetFields(protocol.BodyTarget, "Destination", v, metadata) } + if s.ExistingObjectReplication != nil { + v := s.ExistingObjectReplication + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ExistingObjectReplication", v, metadata) + } if s.Filter != nil { v := s.Filter @@ -4611,11 +5283,25 @@ func (s ReplicationRule) MarshalFields(e protocol.FieldEncoder) error { return nil } +// A container for specifying rule filters. The filters determine the subset +// of objects to which the rule applies. This element is required only if you +// specify more than one filter. +// +// For example: +// +// * If you specify both a Prefix and a Tag filter, wrap these filters in +// an And tag. +// +// * If you specify a filter based on multiple tags, wrap the Tag elements +// in an And tag type ReplicationRuleAndOperator struct { _ struct{} `type:"structure"` + // An object keyname prefix that identifies the subset of objects to which the + // rule applies. Prefix *string `type:"string"` + // An array of tags containing key and value pairs. Tags []Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } @@ -4738,6 +5424,87 @@ func (s ReplicationRuleFilter) MarshalFields(e protocol.FieldEncoder) error { return nil } +// A container specifying the time when all objects and operations on objects +// are replicated. Must be specified together with a Metrics block. +type ReplicationTime struct { + _ struct{} `type:"structure"` + + // Specifies whether the replication time is enabled. + // + // Status is a required field + Status ReplicationTimeStatus `type:"string" required:"true" enum:"true"` + + // A container specifying the time by which replication should complete for + // all objects and operations on objects. + // + // Time is a required field + Time *ReplicationTimeValue `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ReplicationTime) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationTime) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ReplicationTime"} + if len(s.Status) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Status")) + } + + if s.Time == nil { + invalidParams.Add(aws.NewErrParamRequired("Time")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ReplicationTime) MarshalFields(e protocol.FieldEncoder) error { + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Status", v, metadata) + } + if s.Time != nil { + v := s.Time + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Time", v, metadata) + } + return nil +} + +// A container specifying the time value. +type ReplicationTimeValue struct { + _ struct{} `type:"structure"` + + // Contains an integer specifying time in minutes. + Minutes *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReplicationTimeValue) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ReplicationTimeValue) MarshalFields(e protocol.FieldEncoder) error { + if s.Minutes != nil { + v := *s.Minutes + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Minutes", protocol.Int64Value(v), metadata) + } + return nil +} + +// Container for Payer. type RequestPaymentConfiguration struct { _ struct{} `type:"structure"` @@ -4948,6 +5715,7 @@ type Rule struct { // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // Specifies the expiration for the lifecycle of the object. Expiration *LifecycleExpiration `type:"structure"` // Unique identifier for the rule. The value can't be longer than 255 characters. @@ -5106,8 +5874,7 @@ type S3Location struct { // The canned ACL to apply to the restore results. CannedACL ObjectCannedACL `type:"string" enum:"true"` - // Describes the server-side encryption that will be applied to the restore - // results. + // Contains the type of server-side encryption used. Encryption *Encryption `type:"structure"` // The prefix that is prepended to the restore results for this request. @@ -5234,8 +6001,8 @@ func (s S3Location) MarshalFields(e protocol.FieldEncoder) error { type SSEKMS struct { _ struct{} `locationName:"SSE-KMS" type:"structure"` - // Specifies the ID of the AWS Key Management Service (KMS) master encryption - // key to use for encrypting Inventory reports. + // Specifies the ID of the AWS Key Management Service (KMS) customer master + // key (CMK) to use for encrypting Inventory reports. // // KeyId is a required field KeyId *string `type:"string" required:"true" sensitive:"true"` @@ -5525,7 +6292,7 @@ func (s ServerSideEncryptionRule) MarshalFields(e protocol.FieldEncoder) error { // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using -// an AWS KMS-Managed Key (SSE-KMS). +// a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). type SourceSelectionCriteria struct { _ struct{} `type:"structure"` @@ -5572,7 +6339,7 @@ type SseKmsEncryptedObjects struct { _ struct{} `type:"structure"` // Specifies whether Amazon S3 replicates objects created with server-side encryption - // using an AWS KMS-managed key. + // using a customer master key (CMK) stored in AWS Key Management Service. // // Status is a required field Status SseKmsEncryptedObjectsStatus `type:"string" required:"true" enum:"true"` @@ -5649,6 +6416,8 @@ func (s StorageClassAnalysis) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. type StorageClassAnalysisDataExport struct { _ struct{} `type:"structure"` @@ -5707,6 +6476,7 @@ func (s StorageClassAnalysisDataExport) MarshalFields(e protocol.FieldEncoder) e return nil } +// A container of a key value name pair. type Tag struct { _ struct{} `type:"structure"` @@ -5764,9 +6534,12 @@ func (s Tag) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for TagSet elements. type Tagging struct { _ struct{} `type:"structure"` + // A collection for a a set of tags + // // TagSet is a required field TagSet []Tag `locationNameList:"Tag" type:"list" required:"true"` } @@ -5814,9 +6587,11 @@ func (s Tagging) MarshalFields(e protocol.FieldEncoder) error { return nil } +// Container for granting information. type TargetGrant struct { _ struct{} `type:"structure"` + // Container for the person being granted permissions. Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Logging permissions assigned to the Grantee for the bucket. @@ -5953,12 +6728,17 @@ func (s TopicConfiguration) MarshalFields(e protocol.FieldEncoder) error { return nil } +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. This data type is deperecated. Please use TopicConfiguration +// instead. type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` // Bucket event for which to send notifications. Event Event `deprecated:"true" type:"string" enum:"true"` + // A collection of events related to objects Events []Event `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. diff --git a/service/s3/s3manager/upload_input.go b/service/s3/s3manager/upload_input.go index 632d37e11a5..652ce38ce3a 100644 --- a/service/s3/s3manager/upload_input.go +++ b/service/s3/s3manager/upload_input.go @@ -16,7 +16,8 @@ import ( type UploadInput struct { _ struct{} `type:"structure" payload:"Body"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). ACL s3.ObjectCannedACL `location:"header" locationName:"x-amz-acl" type:"string" enum:"true"` // The readable body payload to send to S3. @@ -27,29 +28,38 @@ type UploadInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies caching behavior along the request/reply chain. + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Specifies presentational information for the object. + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` // The language the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI. This parameted is required - // if object lock parameters are specified. + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - // A standard MIME type describing the format of the object data. + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The date and time at which the object is no longer cacheable. + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. @@ -72,13 +82,14 @@ type UploadInput struct { // A map of metadata to store with the object in S3. Metadata map[string]string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // The Legal Hold status that you want to apply to the specified object. + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockLegalHoldStatus s3.ObjectLockLegalHoldStatus `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"true"` - // The object lock mode that you want to apply to this object. + // The Object Lock mode that you want to apply to this object. ObjectLockMode s3.ObjectLockMode `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"true"` - // The date and time when you want this object's object lock to expire. + // The date and time when you want this object's Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // Confirms that the requester knows that she or he will be charged for the @@ -107,17 +118,22 @@ type UploadInput struct { // encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // If the x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // customer master key (CMK) that was used for the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the AWS KMS CMK that will be used for the object. If you specify + // x-amz-server-side-encryption:aws:kms, but do not provide x-amz-server-side-encryption-aws-kms-key-id, + // Amazon S3 uses the AWS managed CMK in AWS to protect the data. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). ServerSideEncryption s3.ServerSideEncryption `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"true"` - // The type of storage to use for the object. Defaults to 'STANDARD'. + // If you don't specify, Standard is the default storage class. Amazon S3 supports + // other storage classes. StorageClass s3.StorageClass `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"true"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. @@ -126,6 +142,21 @@ type UploadInput struct { // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. + // the value of this header in the object metadata. For information about object + // metadata, see . + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } diff --git a/service/sagemaker/api_enums.go b/service/sagemaker/api_enums.go index 3c3d3b20037..b4c6dfac234 100644 --- a/service/sagemaker/api_enums.go +++ b/service/sagemaker/api_enums.go @@ -163,6 +163,23 @@ func (enum CompressionType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type ContainerMode string + +// Enum values for ContainerMode +const ( + ContainerModeSingleModel ContainerMode = "SingleModel" + ContainerModeMultiModel ContainerMode = "MultiModel" +) + +func (enum ContainerMode) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ContainerMode) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type ContentClassifier string // Enum values for ContentClassifier diff --git a/service/sagemaker/api_op_CreateEndpointConfig.go b/service/sagemaker/api_op_CreateEndpointConfig.go index 3c7bf6759e0..e39081bd433 100644 --- a/service/sagemaker/api_op_CreateEndpointConfig.go +++ b/service/sagemaker/api_op_CreateEndpointConfig.go @@ -24,17 +24,20 @@ type CreateEndpointConfigInput struct { // SageMaker uses to encrypt data on the storage volume attached to the ML compute // instance that hosts the endpoint. // - // Nitro-based instances do not support encryption with AWS KMS. If any of the - // models that you specify in the ProductionVariants parameter use nitro-based - // instances, do not specify a value for the KmsKeyId parameter. If you specify - // a value for KmsKeyId when using any nitro-based instances, the call to CreateEndpointConfig + // Certain Nitro-based instances include local storage, dependent on the instance + // type. Local storage volumes are encrypted using a hardware module on the + // instance. You can't request a KmsKeyId when using an instance type with local + // storage. If any of the models that you specify in the ProductionVariants + // parameter use nitro-based instances with local storage, do not specify a + // value for the KmsKeyId parameter. If you specify a value for KmsKeyId when + // using any nitro-based instances with local storage, the call to CreateEndpointConfig // fails. // - // For a list of nitro-based instances, see Nitro-based Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances) - // in the Amazon Elastic Compute Cloud User Guide for Linux Instances. + // For a list of instance types that support local instance storage, see Instance + // Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#instance-store-volumes). // - // For more information about storage volumes on nitro-based instances, see - // Amazon EBS and NVMe on Linux Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html). + // For more information about local instance storage encryption, see SSD Instance + // Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html). KmsKeyId *string `type:"string"` // An list of ProductionVariant objects, one for each model that you want to diff --git a/service/sagemaker/api_op_CreateTransformJob.go b/service/sagemaker/api_op_CreateTransformJob.go index b04521017eb..30f39d5b2ac 100644 --- a/service/sagemaker/api_op_CreateTransformJob.go +++ b/service/sagemaker/api_op_CreateTransformJob.go @@ -17,8 +17,8 @@ type CreateTransformJobInput struct { // request. A record is a single unit of input data that inference can be made // on. For example, a single line in a CSV file is a record. // - // To enable the batch strategy, you must set SplitType to Line, RecordIO, or - // TFRecord. + // To enable the batch strategy, you must set the SplitType property of the + // DataProcessing object to Line, RecordIO, or TFRecord. // // To use only one record when making an HTTP invocation request to a container, // set BatchStrategy to SingleRecord and SplitType to Line. @@ -200,8 +200,8 @@ const opCreateTransformJob = "CreateTransformJob" // * TransformResources - Identifies the ML compute instances for the transform // job. // -// For more information about how batch transformation works Amazon SageMaker, -// see How It Works (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html). +// For more information about how batch transformation works, see Batch Transform +// (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html). // // // Example sending a request using CreateTransformJobRequest. // req := client.CreateTransformJobRequest(params) diff --git a/service/sagemaker/api_types.go b/service/sagemaker/api_types.go index c3f8d095d26..30e187a638d 100644 --- a/service/sagemaker/api_types.go +++ b/service/sagemaker/api_types.go @@ -820,6 +820,9 @@ type ContainerDefinition struct { // Using Your Own Algorithms with Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html) Image *string `type:"string"` + // Specifies whether the container hosts a single model or multiple models. + Mode ContainerMode `type:"string" enum:"true"` + // The S3 path where the model artifacts, which result from model training, // are stored. This path must point to a single gzip compressed tar archive // (.tar.gz suffix). The S3 path is required for Amazon SageMaker built-in algorithms, @@ -906,7 +909,7 @@ type ContinuousParameterRange struct { // // ReverseLogarithmic // - // Hyperparemeter tuning searches the values in the hyperparameter range by + // Hyperparameter tuning searches the values in the hyperparameter range by // using a reverse logarithmic scale. // // Reverse logarithmic scaling works only for ranges that are entirely within @@ -999,8 +1002,8 @@ type DataProcessing struct { InputFilter *string `type:"string"` // Specifies the source of the data to join with the transformed data. The valid - // values are None and Input The default value is None which specifies not to - // join the input with the transformed data. If you want the batch transform + // values are None and Input. The default value is None, which specifies not + // to join the input with the transformed data. If you want the batch transform // job to join the original input data with the transformed data, set JoinSource // to Input. // @@ -1282,8 +1285,8 @@ func (s *FileSystemDataSource) Validate() error { return nil } -// A conditional statement for a search expression that includes a Boolean operator, -// a resource property, and a value. +// A conditional statement for a search expression that includes a resource +// property, a Boolean operator, and a value. // // If you don't specify an Operator and a Value, the filter searches for only // the specified property. For example, defining a Filter for the FailureReason @@ -1377,7 +1380,7 @@ type Filter struct { // Contains // // Only supported for text-based properties. The word-list of the property contains - // the specified Value. + // the specified Value. A SearchExpression can include only one Contains operator. // // If you have specified a filter Value, the default is Equals. Operator Operator `type:"string" enum:"true"` @@ -2638,7 +2641,7 @@ type IntegerParameterRange struct { // // Logarithmic // - // Hyperparemeter tuning searches the values in the hyperparameter range by + // Hyperparameter tuning searches the values in the hyperparameter range by // using a logarithmic scale. // // Logarithmic scaling works only for ranges that have only values greater than @@ -3046,6 +3049,8 @@ func (s *LabelingJobS3DataSource) Validate() error { // A set of conditions for stopping a labeling job. If any of the conditions // are met, the job is automatically stopped. You can use these conditions to // control the cost of data labeling. +// +// Labeling jobs fail after 30 days with an appropriate client error message. type LabelingJobStoppingConditions struct { _ struct{} `type:"structure"` @@ -4303,10 +4308,21 @@ type ResourceConfig struct { // InstanceType is a required field InstanceType TrainingInstanceType `type:"string" required:"true" enum:"true"` - // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to - // encrypt data on the storage volume attached to the ML compute instance(s) - // that run the training job. The VolumeKmsKeyId can be any of the following - // formats: + // The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage + // volume attached to the ML compute instance(s) that run the training job. + // + // Certain Nitro-based instances include local storage, dependent on the instance + // type. Local storage volumes are encrypted using a hardware module on the + // instance. You can't request a VolumeKmsKeyId when using an instance type + // with local storage. + // + // For a list of instance types that support local instance storage, see Instance + // Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#instance-store-volumes). + // + // For more information about local instance storage encryption, see SSD Instance + // Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html). + // + // The VolumeKmsKeyId can be in any of the following formats: // // * // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" // @@ -4325,6 +4341,15 @@ type ResourceConfig struct { // Amazon SageMaker supports only the General Purpose SSD (gp2) ML storage volume // type. // + // Certain Nitro-based instances include local storage with a fixed total size, + // dependent on the instance type. When using these instances for training, + // Amazon SageMaker mounts the local instance storage instead of Amazon EBS + // gp2 storage. You can't request a VolumeSizeInGB greater than the total size + // of the local instance storage. + // + // For a list of instance types that support local instance storage, including + // the total size per instance type, see Instance Store Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#instance-store-volumes). + // // VolumeSizeInGB is a required field VolumeSizeInGB *int64 `min:"1" type:"integer" required:"true"` } @@ -4458,13 +4483,14 @@ type S3DataSource struct { // // * A manifest might look like this: s3://bucketname/example.manifest The // manifest is an S3 object which is a JSON file with the following format: - // [ {"prefix": "s3://customer_bucket/some/prefix/"}, "relative/path/to/custdata-1", - // "relative/path/custdata-2", ... ] The preceding JSON matches the following - // s3Uris: s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-2 - // ... The complete set of s3uris in this manifest is the input data for - // the channel for this datasource. The object that each s3uris points to - // must be readable by the IAM role that Amazon SageMaker uses to perform - // tasks on your behalf. + // The preceding JSON matches the following s3Uris: [ {"prefix": "s3://customer_bucket/some/prefix/"}, + // "relative/path/to/custdata-1", "relative/path/custdata-2", ... "relative/path/custdata-N" + // ] The preceding JSON matches the following s3Uris: s3://customer_bucket/some/prefix/relative/path/to/custdata-1 + // s3://customer_bucket/some/prefix/relative/path/custdata-2 ... s3://customer_bucket/some/prefix/relative/path/custdata-N + // The complete set of s3uris in this manifest is the input data for the + // channel for this datasource. The object that each s3uris points to must + // be readable by the IAM role that Amazon SageMaker uses to perform tasks + // on your behalf. // // S3Uri is a required field S3Uri *string `type:"string" required:"true"` @@ -4500,7 +4526,8 @@ func (s *S3DataSource) Validate() error { // A SearchExpression contains the following components: // // * A list of Filter objects. Each filter defines a simple Boolean expression -// comprised of a resource property name, Boolean operator, and value. +// comprised of a resource property name, Boolean operator, and value. A +// SearchExpression can include only one Contains operator. // // * A list of NestedFilter objects. Each nested filter defines a list of // Boolean expressions using a list of resource properties. A nested filter @@ -5507,9 +5534,9 @@ type TransformInput struct { // is removed if the value of BatchStrategy is set to SingleRecord. Padding // is not removed if the value of BatchStrategy is set to MultiRecord. // - // For more information about the RecordIO, see Data Format (http://mxnet.io/architecture/note_data_loading.html#data-format) - // in the MXNet documentation. For more information about the TFRecord, see - // Consuming TFRecord data (https://www.tensorflow.org/guide/datasets#consuming_tfrecord_data) + // For more information about RecordIO, see Create a Dataset Using RecordIO + // (https://mxnet.apache.org/api/faq/recordio) in the MXNet documentation. For + // more information about TFRecord, see Consuming TFRecord data (https://www.tensorflow.org/guide/datasets#consuming_tfrecord_data) // in the TensorFlow documentation. SplitType SplitType `type:"string" enum:"true"` } @@ -5697,7 +5724,7 @@ type TransformOutput struct { // in the Amazon Simple Storage Service Developer Guide. // // The KMS key policy must grant permission to the IAM role that you specify - // in your CreateTramsformJob request. For more information, see Using Key Policies + // in your CreateModel request. For more information, see Using Key Policies // in AWS KMS (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) // in the AWS Key Management Service Developer Guide. KmsKeyId *string `type:"string"` @@ -5820,12 +5847,13 @@ type TransformS3DataSource struct { // * A manifest might look like this: s3://bucketname/example.manifest The // manifest is an S3 object which is a JSON file with the following format: // [ {"prefix": "s3://customer_bucket/some/prefix/"}, "relative/path/to/custdata-1", - // "relative/path/custdata-2", ... ] The preceding JSON matches the following - // S3Uris: s3://customer_bucket/some/prefix/relative/path/to/custdata-1 s3://customer_bucket/some/prefix/relative/path/custdata-1 - // ... The complete set of S3Uris in this manifest constitutes the input - // data for the channel for this datasource. The object that each S3Uris - // points to must be readable by the IAM role that Amazon SageMaker uses - // to perform tasks on your behalf. + // "relative/path/custdata-2", ... "relative/path/custdata-N" ] The preceding + // JSON matches the following s3Uris: s3://customer_bucket/some/prefix/relative/path/to/custdata-1 + // s3://customer_bucket/some/prefix/relative/path/custdata-2 ... s3://customer_bucket/some/prefix/relative/path/custdata-N + // The complete set of S3Uris in this manifest constitutes the input data + // for the channel for this datasource. The object that each S3Uris points + // to must be readable by the IAM role that Amazon SageMaker uses to perform + // tasks on your behalf. // // S3Uri is a required field S3Uri *string `type:"string" required:"true"` diff --git a/service/sagemakerruntime/api_errors.go b/service/sagemakerruntime/api_errors.go index d00f171a501..9df07b4695d 100644 --- a/service/sagemakerruntime/api_errors.go +++ b/service/sagemakerruntime/api_errors.go @@ -13,7 +13,8 @@ const ( // ErrCodeModelError for service response error code // "ModelError". // - // Model (owned by the customer in the container) returned an error 500. + // Model (owned by the customer in the container) returned 4xx or 5xx error + // code. ErrCodeModelError = "ModelError" // ErrCodeServiceUnavailable for service response error code diff --git a/service/sagemakerruntime/api_op_InvokeEndpoint.go b/service/sagemakerruntime/api_op_InvokeEndpoint.go index 3c7a3ef000d..3d834d6ad17 100644 --- a/service/sagemakerruntime/api_op_InvokeEndpoint.go +++ b/service/sagemakerruntime/api_op_InvokeEndpoint.go @@ -20,7 +20,7 @@ type InvokeEndpointInput struct { // Amazon SageMaker passes all of the data in the body to the model. // // For information about the format of the request body, see Common Data Formats—Inference - // (http://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html). // // Body is a required field Body []byte `type:"blob" required:"true" sensitive:"true"` @@ -28,14 +28,27 @@ type InvokeEndpointInput struct { // The MIME type of the input data in the request body. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + // Provides additional information about a request for an inference submitted + // to a model hosted at an Amazon SageMaker endpoint. The information is an + // opaque value that is forwarded verbatim. You could use this value, for example, + // to provide an ID that you can use to track a request or to provide other + // metadata that a service endpoint was programmed to process. The value must + // consist of no more than 1024 visible US-ASCII characters as specified in + // Section 3.3.6. Field Value Components (https://tools.ietf.org/html/rfc7230#section-3.2.6) + // of the Hypertext Transfer Protocol (HTTP/1.1). This feature is currently + // supported in the AWS SDKs but not in the Amazon SageMaker Python SDK. CustomAttributes *string `location:"header" locationName:"X-Amzn-SageMaker-Custom-Attributes" type:"string" sensitive:"true"` // The name of the endpoint that you specified when you created the endpoint - // using the CreateEndpoint (http://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html) + // using the CreateEndpoint (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html) // API. // // EndpointName is a required field EndpointName *string `location:"uri" locationName:"EndpointName" type:"string" required:"true"` + + // Specifies the model to be requested for an inference when invoking a multi-model + // endpoint. + TargetModel *string `location:"header" locationName:"X-Amzn-SageMaker-Target-Model" min:"1" type:"string"` } // String returns the string representation @@ -54,6 +67,9 @@ func (s *InvokeEndpointInput) Validate() error { if s.EndpointName == nil { invalidParams.Add(aws.NewErrParamRequired("EndpointName")) } + if s.TargetModel != nil && len(*s.TargetModel) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TargetModel", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -82,6 +98,12 @@ func (s InvokeEndpointInput) MarshalFields(e protocol.FieldEncoder) error { metadata := protocol.Metadata{} e.SetValue(protocol.HeaderTarget, "X-Amzn-SageMaker-Custom-Attributes", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) } + if s.TargetModel != nil { + v := *s.TargetModel + + metadata := protocol.Metadata{} + e.SetValue(protocol.HeaderTarget, "X-Amzn-SageMaker-Target-Model", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } if s.EndpointName != nil { v := *s.EndpointName @@ -103,7 +125,7 @@ type InvokeEndpointOutput struct { // Includes the inference provided by the model. // // For information about the format of the response body, see Common Data Formats—Inference - // (http://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-inference.html). // // Body is a required field Body []byte `type:"blob" required:"true" sensitive:"true"` @@ -111,6 +133,19 @@ type InvokeEndpointOutput struct { // The MIME type of the inference returned in the response body. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + // Provides additional information in the response about the inference returned + // by a model hosted at an Amazon SageMaker endpoint. The information is an + // opaque value that is forwarded verbatim. You could use this value, for example, + // to return an ID received in the CustomAttributes header of a request or other + // metadata that a service endpoint was programmed to produce. The value must + // consist of no more than 1024 visible US-ASCII characters as specified in + // Section 3.3.6. Field Value Components (https://tools.ietf.org/html/rfc7230#section-3.2.6) + // of the Hypertext Transfer Protocol (HTTP/1.1). If the customer wants the + // custom attribute returned, the model must set the custom attribute to be + // included on the way back. + // + // This feature is currently supported in the AWS SDKs but not in the Amazon + // SageMaker Python SDK. CustomAttributes *string `location:"header" locationName:"X-Amzn-SageMaker-Custom-Attributes" type:"string" sensitive:"true"` // Identifies the production variant that was invoked. @@ -160,16 +195,22 @@ const opInvokeEndpoint = "InvokeEndpoint" // your client applications use this API to get inferences from the model hosted // at the specified endpoint. // -// For an overview of Amazon SageMaker, see How It Works (http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html). +// For an overview of Amazon SageMaker, see How It Works (https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html). // // Amazon SageMaker strips all POST headers except those supported by the API. // Amazon SageMaker might add additional headers. You should not rely on the // behavior of headers outside those enumerated in the request syntax. // -// Cals to InvokeEndpoint are authenticated by using AWS Signature Version 4. -// For information, see Authenticating Requests (AWS Signature Version 4) (http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) +// Calls to InvokeEndpoint are authenticated by using AWS Signature Version +// 4. For information, see Authenticating Requests (AWS Signature Version 4) +// (http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) // in the Amazon S3 API Reference. // +// A customer's model containers must respond to requests within 60 seconds. +// The model itself can have a maximum processing time of 60 seconds before +// responding to the /invocations. If your model is going to take 50-60 seconds +// of processing time, the SDK socket timeout should be set to be 70 seconds. +// // Endpoints are scoped to an individual account, and are not public. The URL // does not contain the account ID, but Amazon SageMaker determines the account // ID from the authentication token that is supplied by the caller. diff --git a/service/sesv2/api_client.go b/service/sesv2/api_client.go new file mode 100644 index 00000000000..172a92fa05a --- /dev/null +++ b/service/sesv2/api_client.go @@ -0,0 +1,79 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/private/protocol/restjson" +) + +// Client provides the API operation methods for making requests to +// Amazon SES V2. See this package's package overview docs +// for details on the service. +// +// The client's methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type Client struct { + *aws.Client +} + +// Used for custom client initialization logic +var initClient func(*Client) + +// Used for custom request initialization logic +var initRequest func(*Client, *aws.Request) + +const ( + ServiceName = "Amazon SES V2" // Service's name + ServiceID = "SESv2" // Service's identifier + EndpointsID = "email" // Service's Endpoint identifier +) + +// New creates a new instance of the client from the provided Config. +// +// Example: +// // Create a client from just a config. +// svc := sesv2.New(myConfig) +func New(config aws.Config) *Client { + svc := &Client{ + Client: aws.NewClient( + config, + aws.Metadata{ + ServiceName: ServiceName, + ServiceID: ServiceID, + EndpointsID: EndpointsID, + SigningName: "ses", + SigningRegion: config.Region, + APIVersion: "2019-09-27", + }, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc) + } + + return svc +} + +// newRequest creates a new request for a client operation and runs any +// custom request initialization. +func (c *Client) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(c, req) + } + + return req +} diff --git a/service/sesv2/api_doc.go b/service/sesv2/api_doc.go new file mode 100644 index 00000000000..ede3399bacd --- /dev/null +++ b/service/sesv2/api_doc.go @@ -0,0 +1,54 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sesv2 provides the client and types for making API +// requests to Amazon SES V2. +// +// Welcome to the Amazon SES API v2 Reference. This guide provides information +// about the Amazon SES API v2, including supported operations, data types, +// parameters, and schemas. +// +// Amazon SES (https://aws.amazon.com/pinpoint) is an AWS service that you can +// use to send email messages to your customers. +// +// If you're new to Amazon SES API v2, you might find it helpful to also review +// the Amazon Simple Email Service Developer Guide (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/). +// The Amazon SES Developer Guide provides information and code samples that +// demonstrate how to use Amazon SES API v2 features programmatically. +// +// The Amazon SES API v2 is available in several AWS Regions and it provides +// an endpoint for each of these Regions. For a list of all the Regions and +// endpoints where the API is currently available, see AWS Service Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region) in +// the Amazon Web Services General Reference. To learn more about AWS Regions, +// see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) +// in the Amazon Web Services General Reference. +// +// In each Region, AWS maintains multiple Availability Zones. These Availability +// Zones are physically isolated from each other, but are united by private, +// low-latency, high-throughput, and highly redundant network connections. These +// Availability Zones enable us to provide very high levels of availability +// and redundancy, while also minimizing latency. To learn more about the number +// of Availability Zones that are available in each Region, see AWS Global Infrastructure +// (http://aws.amazon.com/about-aws/global-infrastructure/). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27 for more information on this service. +// +// See sesv2 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sesv2/ +// +// Using the Client +// +// To use Amazon SES V2 with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon SES V2 client for more information on +// creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sesv2/#New +package sesv2 diff --git a/service/sesv2/api_enums.go b/service/sesv2/api_enums.go new file mode 100644 index 00000000000..e3faa78d76a --- /dev/null +++ b/service/sesv2/api_enums.go @@ -0,0 +1,250 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +// The action that you want to take if the required MX record can't be found +// when you send an email. When you set this value to UseDefaultValue, the mail +// is sent using amazonses.com as the MAIL FROM domain. When you set this value +// to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified +// error, and doesn't attempt to deliver the email. +// +// These behaviors are taken when the custom MAIL FROM domain configuration +// is in the Pending, Failed, and TemporaryFailure states. +type BehaviorOnMxFailure string + +// Enum values for BehaviorOnMxFailure +const ( + BehaviorOnMxFailureUseDefaultValue BehaviorOnMxFailure = "USE_DEFAULT_VALUE" + BehaviorOnMxFailureRejectMessage BehaviorOnMxFailure = "REJECT_MESSAGE" +) + +func (enum BehaviorOnMxFailure) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum BehaviorOnMxFailure) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// The current status of your Deliverability dashboard subscription. If this +// value is PENDING_EXPIRATION, your subscription is scheduled to expire at +// the end of the current calendar month. +type DeliverabilityDashboardAccountStatus string + +// Enum values for DeliverabilityDashboardAccountStatus +const ( + DeliverabilityDashboardAccountStatusActive DeliverabilityDashboardAccountStatus = "ACTIVE" + DeliverabilityDashboardAccountStatusPendingExpiration DeliverabilityDashboardAccountStatus = "PENDING_EXPIRATION" + DeliverabilityDashboardAccountStatusDisabled DeliverabilityDashboardAccountStatus = "DISABLED" +) + +func (enum DeliverabilityDashboardAccountStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DeliverabilityDashboardAccountStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// The status of a predictive inbox placement test. If the status is IN_PROGRESS, +// then the predictive inbox placement test is currently running. Predictive +// inbox placement tests are usually complete within 24 hours of creating the +// test. If the status is COMPLETE, then the test is finished, and you can use +// the GetDeliverabilityTestReport operation to view the results of the test. +type DeliverabilityTestStatus string + +// Enum values for DeliverabilityTestStatus +const ( + DeliverabilityTestStatusInProgress DeliverabilityTestStatus = "IN_PROGRESS" + DeliverabilityTestStatusCompleted DeliverabilityTestStatus = "COMPLETED" +) + +func (enum DeliverabilityTestStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DeliverabilityTestStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// The location where the Amazon SES API v2 finds the value of a dimension to +// publish to Amazon CloudWatch. If you want to use the message tags that you +// specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail +// or SendRawEmail API, choose messageTag. If you want to use your own email +// headers, choose emailHeader. If you want to use link tags, choose linkTags. +type DimensionValueSource string + +// Enum values for DimensionValueSource +const ( + DimensionValueSourceMessageTag DimensionValueSource = "MESSAGE_TAG" + DimensionValueSourceEmailHeader DimensionValueSource = "EMAIL_HEADER" + DimensionValueSourceLinkTag DimensionValueSource = "LINK_TAG" +) + +func (enum DimensionValueSource) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DimensionValueSource) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// The DKIM authentication status of the identity. The status can be one of +// the following: +// +// * PENDING – The DKIM verification process was initiated, and Amazon +// SES hasn't yet detected the CNAME records in the DNS configuration for +// the domain. +// +// * SUCCESS – The DKIM authentication process completed successfully. +// +// * FAILED – The DKIM authentication process failed. This can happen when +// Amazon SES fails to find the required CNAME records in the DNS configuration +// of the domain. +// +// * TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from +// determining the DKIM authentication status of the domain. +// +// * NOT_STARTED – The DKIM verification process hasn't been initiated +// for the domain. +type DkimStatus string + +// Enum values for DkimStatus +const ( + DkimStatusPending DkimStatus = "PENDING" + DkimStatusSuccess DkimStatus = "SUCCESS" + DkimStatusFailed DkimStatus = "FAILED" + DkimStatusTemporaryFailure DkimStatus = "TEMPORARY_FAILURE" + DkimStatusNotStarted DkimStatus = "NOT_STARTED" +) + +func (enum DkimStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DkimStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// An email sending event type. For example, email sends, opens, and bounces +// are all email events. +type EventType string + +// Enum values for EventType +const ( + EventTypeSend EventType = "SEND" + EventTypeReject EventType = "REJECT" + EventTypeBounce EventType = "BOUNCE" + EventTypeComplaint EventType = "COMPLAINT" + EventTypeDelivery EventType = "DELIVERY" + EventTypeOpen EventType = "OPEN" + EventTypeClick EventType = "CLICK" + EventTypeRenderingFailure EventType = "RENDERING_FAILURE" +) + +func (enum EventType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum EventType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// The email identity type. The identity type can be one of the following: +// +// * EMAIL_ADDRESS – The identity is an email address. +// +// * DOMAIN – The identity is a domain. +type IdentityType string + +// Enum values for IdentityType +const ( + IdentityTypeEmailAddress IdentityType = "EMAIL_ADDRESS" + IdentityTypeDomain IdentityType = "DOMAIN" + IdentityTypeManagedDomain IdentityType = "MANAGED_DOMAIN" +) + +func (enum IdentityType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum IdentityType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// The status of the MAIL FROM domain. This status can have the following values: +// +// * PENDING – Amazon SES hasn't started searching for the MX record yet. +// +// * SUCCESS – Amazon SES detected the required MX record for the MAIL +// FROM domain. +// +// * FAILED – Amazon SES can't find the required MX record, or the record +// no longer exists. +// +// * TEMPORARY_FAILURE – A temporary issue occurred, which prevented Amazon +// SES from determining the status of the MAIL FROM domain. +type MailFromDomainStatus string + +// Enum values for MailFromDomainStatus +const ( + MailFromDomainStatusPending MailFromDomainStatus = "PENDING" + MailFromDomainStatusSuccess MailFromDomainStatus = "SUCCESS" + MailFromDomainStatusFailed MailFromDomainStatus = "FAILED" + MailFromDomainStatusTemporaryFailure MailFromDomainStatus = "TEMPORARY_FAILURE" +) + +func (enum MailFromDomainStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum MailFromDomainStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// Specifies whether messages that use the configuration set are required to +// use Transport Layer Security (TLS). If the value is Require, messages are +// only delivered if a TLS connection can be established. If the value is Optional, +// messages can be delivered in plain text if a TLS connection can't be established. +type TlsPolicy string + +// Enum values for TlsPolicy +const ( + TlsPolicyRequire TlsPolicy = "REQUIRE" + TlsPolicyOptional TlsPolicy = "OPTIONAL" +) + +func (enum TlsPolicy) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum TlsPolicy) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// The warmup status of a dedicated IP. +type WarmupStatus string + +// Enum values for WarmupStatus +const ( + WarmupStatusInProgress WarmupStatus = "IN_PROGRESS" + WarmupStatusDone WarmupStatus = "DONE" +) + +func (enum WarmupStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum WarmupStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} diff --git a/service/sesv2/api_errors.go b/service/sesv2/api_errors.go new file mode 100644 index 00000000000..e66677410aa --- /dev/null +++ b/service/sesv2/api_errors.go @@ -0,0 +1,68 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +const ( + + // ErrCodeAccountSuspendedException for service response error code + // "AccountSuspendedException". + // + // The message can't be sent because the account's ability to send email has + // been permanently restricted. + ErrCodeAccountSuspendedException = "AccountSuspendedException" + + // ErrCodeAlreadyExistsException for service response error code + // "AlreadyExistsException". + // + // The resource specified in your request already exists. + ErrCodeAlreadyExistsException = "AlreadyExistsException" + + // ErrCodeBadRequestException for service response error code + // "BadRequestException". + // + // The input you provided is invalid. + ErrCodeBadRequestException = "BadRequestException" + + // ErrCodeConcurrentModificationException for service response error code + // "ConcurrentModificationException". + // + // The resource is being modified by another operation or thread. + ErrCodeConcurrentModificationException = "ConcurrentModificationException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // There are too many instances of the specified resource type. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodeMailFromDomainNotVerifiedException for service response error code + // "MailFromDomainNotVerifiedException". + // + // The message can't be sent because the sending domain isn't verified. + ErrCodeMailFromDomainNotVerifiedException = "MailFromDomainNotVerifiedException" + + // ErrCodeMessageRejected for service response error code + // "MessageRejected". + // + // The message can't be sent because it contains invalid content. + ErrCodeMessageRejected = "MessageRejected" + + // ErrCodeNotFoundException for service response error code + // "NotFoundException". + // + // The resource you attempted to access doesn't exist. + ErrCodeNotFoundException = "NotFoundException" + + // ErrCodeSendingPausedException for service response error code + // "SendingPausedException". + // + // The message can't be sent because the account's ability to send email is + // currently paused. + ErrCodeSendingPausedException = "SendingPausedException" + + // ErrCodeTooManyRequestsException for service response error code + // "TooManyRequestsException". + // + // Too many requests have been made to the operation. + ErrCodeTooManyRequestsException = "TooManyRequestsException" +) diff --git a/service/sesv2/api_op_CreateConfigurationSet.go b/service/sesv2/api_op_CreateConfigurationSet.go new file mode 100644 index 00000000000..6fdc7aa53ac --- /dev/null +++ b/service/sesv2/api_op_CreateConfigurationSet.go @@ -0,0 +1,210 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to create a configuration set. +type CreateConfigurationSetInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `type:"string" required:"true"` + + // An object that defines the dedicated IP pool that is used to send emails + // that you send using the configuration set. + DeliveryOptions *DeliveryOptions `type:"structure"` + + // An object that defines whether or not Amazon SES collects reputation metrics + // for the emails that you send that use the configuration set. + ReputationOptions *ReputationOptions `type:"structure"` + + // An object that defines whether or not Amazon SES can send email that you + // send using the configuration set. + SendingOptions *SendingOptions `type:"structure"` + + // An array of objects that define the tags (keys and values) that you want + // to associate with the configuration set. + Tags []Tag `type:"list"` + + // An object that defines the open and click tracking options for emails that + // you send using the configuration set. + TrackingOptions *TrackingOptions `type:"structure"` +} + +// String returns the string representation +func (s CreateConfigurationSetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateConfigurationSetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateConfigurationSetInput"} + + if s.ConfigurationSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfigurationSetName")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + if s.TrackingOptions != nil { + if err := s.TrackingOptions.Validate(); err != nil { + invalidParams.AddNested("TrackingOptions", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateConfigurationSetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DeliveryOptions != nil { + v := s.DeliveryOptions + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DeliveryOptions", v, metadata) + } + if s.ReputationOptions != nil { + v := s.ReputationOptions + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ReputationOptions", v, metadata) + } + if s.SendingOptions != nil { + v := s.SendingOptions + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SendingOptions", v, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.TrackingOptions != nil { + v := s.TrackingOptions + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "TrackingOptions", v, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type CreateConfigurationSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateConfigurationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateConfigurationSetOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opCreateConfigurationSet = "CreateConfigurationSet" + +// CreateConfigurationSetRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Create a configuration set. Configuration sets are groups of rules that you +// can apply to the emails that you send. You apply a configuration set to an +// email by specifying the name of the configuration set when you call the Amazon +// SES API v2. When you apply a configuration set to an email, all of the rules +// in that configuration set are applied to the email. +// +// // Example sending a request using CreateConfigurationSetRequest. +// req := client.CreateConfigurationSetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/CreateConfigurationSet +func (c *Client) CreateConfigurationSetRequest(input *CreateConfigurationSetInput) CreateConfigurationSetRequest { + op := &aws.Operation{ + Name: opCreateConfigurationSet, + HTTPMethod: "POST", + HTTPPath: "/v2/email/configuration-sets", + } + + if input == nil { + input = &CreateConfigurationSetInput{} + } + + req := c.newRequest(op, input, &CreateConfigurationSetOutput{}) + return CreateConfigurationSetRequest{Request: req, Input: input, Copy: c.CreateConfigurationSetRequest} +} + +// CreateConfigurationSetRequest is the request type for the +// CreateConfigurationSet API operation. +type CreateConfigurationSetRequest struct { + *aws.Request + Input *CreateConfigurationSetInput + Copy func(*CreateConfigurationSetInput) CreateConfigurationSetRequest +} + +// Send marshals and sends the CreateConfigurationSet API request. +func (r CreateConfigurationSetRequest) Send(ctx context.Context) (*CreateConfigurationSetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateConfigurationSetResponse{ + CreateConfigurationSetOutput: r.Request.Data.(*CreateConfigurationSetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateConfigurationSetResponse is the response type for the +// CreateConfigurationSet API operation. +type CreateConfigurationSetResponse struct { + *CreateConfigurationSetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateConfigurationSet request. +func (r *CreateConfigurationSetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_CreateConfigurationSetEventDestination.go b/service/sesv2/api_op_CreateConfigurationSetEventDestination.go new file mode 100644 index 00000000000..c6ad1a701ef --- /dev/null +++ b/service/sesv2/api_op_CreateConfigurationSetEventDestination.go @@ -0,0 +1,180 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to add an event destination to a configuration set. +type CreateConfigurationSetEventDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that you want to add an event destination + // to. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `location:"uri" locationName:"ConfigurationSetName" type:"string" required:"true"` + + // An object that defines the event destination. + // + // EventDestination is a required field + EventDestination *EventDestinationDefinition `type:"structure" required:"true"` + + // A name that identifies the event destination within the configuration set. + // + // EventDestinationName is a required field + EventDestinationName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateConfigurationSetEventDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateConfigurationSetEventDestinationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateConfigurationSetEventDestinationInput"} + + if s.ConfigurationSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfigurationSetName")) + } + + if s.EventDestination == nil { + invalidParams.Add(aws.NewErrParamRequired("EventDestination")) + } + + if s.EventDestinationName == nil { + invalidParams.Add(aws.NewErrParamRequired("EventDestinationName")) + } + if s.EventDestination != nil { + if err := s.EventDestination.Validate(); err != nil { + invalidParams.AddNested("EventDestination", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateConfigurationSetEventDestinationInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.EventDestination != nil { + v := s.EventDestination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "EventDestination", v, metadata) + } + if s.EventDestinationName != nil { + v := *s.EventDestinationName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "EventDestinationName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type CreateConfigurationSetEventDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateConfigurationSetEventDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateConfigurationSetEventDestinationOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opCreateConfigurationSetEventDestination = "CreateConfigurationSetEventDestination" + +// CreateConfigurationSetEventDestinationRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Create an event destination. Events include message sends, deliveries, opens, +// clicks, bounces, and complaints. Event destinations are places that you can +// send information about these events to. For example, you can send event data +// to Amazon SNS to receive notifications when you receive bounces or complaints, +// or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for +// long-term storage. +// +// A single configuration set can include more than one event destination. +// +// // Example sending a request using CreateConfigurationSetEventDestinationRequest. +// req := client.CreateConfigurationSetEventDestinationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/CreateConfigurationSetEventDestination +func (c *Client) CreateConfigurationSetEventDestinationRequest(input *CreateConfigurationSetEventDestinationInput) CreateConfigurationSetEventDestinationRequest { + op := &aws.Operation{ + Name: opCreateConfigurationSetEventDestination, + HTTPMethod: "POST", + HTTPPath: "/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations", + } + + if input == nil { + input = &CreateConfigurationSetEventDestinationInput{} + } + + req := c.newRequest(op, input, &CreateConfigurationSetEventDestinationOutput{}) + return CreateConfigurationSetEventDestinationRequest{Request: req, Input: input, Copy: c.CreateConfigurationSetEventDestinationRequest} +} + +// CreateConfigurationSetEventDestinationRequest is the request type for the +// CreateConfigurationSetEventDestination API operation. +type CreateConfigurationSetEventDestinationRequest struct { + *aws.Request + Input *CreateConfigurationSetEventDestinationInput + Copy func(*CreateConfigurationSetEventDestinationInput) CreateConfigurationSetEventDestinationRequest +} + +// Send marshals and sends the CreateConfigurationSetEventDestination API request. +func (r CreateConfigurationSetEventDestinationRequest) Send(ctx context.Context) (*CreateConfigurationSetEventDestinationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateConfigurationSetEventDestinationResponse{ + CreateConfigurationSetEventDestinationOutput: r.Request.Data.(*CreateConfigurationSetEventDestinationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateConfigurationSetEventDestinationResponse is the response type for the +// CreateConfigurationSetEventDestination API operation. +type CreateConfigurationSetEventDestinationResponse struct { + *CreateConfigurationSetEventDestinationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateConfigurationSetEventDestination request. +func (r *CreateConfigurationSetEventDestinationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_CreateDedicatedIpPool.go b/service/sesv2/api_op_CreateDedicatedIpPool.go new file mode 100644 index 00000000000..943cb7d8e54 --- /dev/null +++ b/service/sesv2/api_op_CreateDedicatedIpPool.go @@ -0,0 +1,165 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to create a new dedicated IP pool. +type CreateDedicatedIpPoolInput struct { + _ struct{} `type:"structure"` + + // The name of the dedicated IP pool. + // + // PoolName is a required field + PoolName *string `type:"string" required:"true"` + + // An object that defines the tags (keys and values) that you want to associate + // with the pool. + Tags []Tag `type:"list"` +} + +// String returns the string representation +func (s CreateDedicatedIpPoolInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDedicatedIpPoolInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateDedicatedIpPoolInput"} + + if s.PoolName == nil { + invalidParams.Add(aws.NewErrParamRequired("PoolName")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateDedicatedIpPoolInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.PoolName != nil { + v := *s.PoolName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PoolName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type CreateDedicatedIpPoolOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateDedicatedIpPoolOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateDedicatedIpPoolOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opCreateDedicatedIpPool = "CreateDedicatedIpPool" + +// CreateDedicatedIpPoolRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Create a new pool of dedicated IP addresses. A pool can include one or more +// dedicated IP addresses that are associated with your AWS account. You can +// associate a pool with a configuration set. When you send an email that uses +// that configuration set, the message is sent from one of the addresses in +// the associated pool. +// +// // Example sending a request using CreateDedicatedIpPoolRequest. +// req := client.CreateDedicatedIpPoolRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/CreateDedicatedIpPool +func (c *Client) CreateDedicatedIpPoolRequest(input *CreateDedicatedIpPoolInput) CreateDedicatedIpPoolRequest { + op := &aws.Operation{ + Name: opCreateDedicatedIpPool, + HTTPMethod: "POST", + HTTPPath: "/v2/email/dedicated-ip-pools", + } + + if input == nil { + input = &CreateDedicatedIpPoolInput{} + } + + req := c.newRequest(op, input, &CreateDedicatedIpPoolOutput{}) + return CreateDedicatedIpPoolRequest{Request: req, Input: input, Copy: c.CreateDedicatedIpPoolRequest} +} + +// CreateDedicatedIpPoolRequest is the request type for the +// CreateDedicatedIpPool API operation. +type CreateDedicatedIpPoolRequest struct { + *aws.Request + Input *CreateDedicatedIpPoolInput + Copy func(*CreateDedicatedIpPoolInput) CreateDedicatedIpPoolRequest +} + +// Send marshals and sends the CreateDedicatedIpPool API request. +func (r CreateDedicatedIpPoolRequest) Send(ctx context.Context) (*CreateDedicatedIpPoolResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateDedicatedIpPoolResponse{ + CreateDedicatedIpPoolOutput: r.Request.Data.(*CreateDedicatedIpPoolOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateDedicatedIpPoolResponse is the response type for the +// CreateDedicatedIpPool API operation. +type CreateDedicatedIpPoolResponse struct { + *CreateDedicatedIpPoolOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateDedicatedIpPool request. +func (r *CreateDedicatedIpPoolResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_CreateDeliverabilityTestReport.go b/service/sesv2/api_op_CreateDeliverabilityTestReport.go new file mode 100644 index 00000000000..ad7ba062814 --- /dev/null +++ b/service/sesv2/api_op_CreateDeliverabilityTestReport.go @@ -0,0 +1,233 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to perform a predictive inbox placement test. Predictive inbox +// placement tests can help you predict how your messages will be handled by +// various email providers around the world. When you perform a predictive inbox +// placement test, you provide a sample message that contains the content that +// you plan to send to your customers. We send that message to special email +// addresses spread across several major email providers around the world. The +// test takes about 24 hours to complete. When the test is complete, you can +// use the GetDeliverabilityTestReport operation to view the results of the +// test. +type CreateDeliverabilityTestReportInput struct { + _ struct{} `type:"structure"` + + // The HTML body of the message that you sent when you performed the predictive + // inbox placement test. + // + // Content is a required field + Content *EmailContent `type:"structure" required:"true"` + + // The email address that the predictive inbox placement test email was sent + // from. + // + // FromEmailAddress is a required field + FromEmailAddress *string `type:"string" required:"true"` + + // A unique name that helps you to identify the predictive inbox placement test + // when you retrieve the results. + ReportName *string `type:"string"` + + // An array of objects that define the tags (keys and values) that you want + // to associate with the predictive inbox placement test. + Tags []Tag `type:"list"` +} + +// String returns the string representation +func (s CreateDeliverabilityTestReportInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDeliverabilityTestReportInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateDeliverabilityTestReportInput"} + + if s.Content == nil { + invalidParams.Add(aws.NewErrParamRequired("Content")) + } + + if s.FromEmailAddress == nil { + invalidParams.Add(aws.NewErrParamRequired("FromEmailAddress")) + } + if s.Content != nil { + if err := s.Content.Validate(); err != nil { + invalidParams.AddNested("Content", err.(aws.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateDeliverabilityTestReportInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Content != nil { + v := s.Content + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Content", v, metadata) + } + if s.FromEmailAddress != nil { + v := *s.FromEmailAddress + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "FromEmailAddress", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ReportName != nil { + v := *s.ReportName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ReportName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// Information about the predictive inbox placement test that you created. +type CreateDeliverabilityTestReportOutput struct { + _ struct{} `type:"structure"` + + // The status of the predictive inbox placement test. If the status is IN_PROGRESS, + // then the predictive inbox placement test is currently running. Predictive + // inbox placement tests are usually complete within 24 hours of creating the + // test. If the status is COMPLETE, then the test is finished, and you can use + // the GetDeliverabilityTestReport to view the results of the test. + // + // DeliverabilityTestStatus is a required field + DeliverabilityTestStatus DeliverabilityTestStatus `type:"string" required:"true" enum:"true"` + + // A unique string that identifies the predictive inbox placement test. + // + // ReportId is a required field + ReportId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDeliverabilityTestReportOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateDeliverabilityTestReportOutput) MarshalFields(e protocol.FieldEncoder) error { + if len(s.DeliverabilityTestStatus) > 0 { + v := s.DeliverabilityTestStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DeliverabilityTestStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.ReportId != nil { + v := *s.ReportId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ReportId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opCreateDeliverabilityTestReport = "CreateDeliverabilityTestReport" + +// CreateDeliverabilityTestReportRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Create a new predictive inbox placement test. Predictive inbox placement +// tests can help you predict how your messages will be handled by various email +// providers around the world. When you perform a predictive inbox placement +// test, you provide a sample message that contains the content that you plan +// to send to your customers. Amazon SES API v2 then sends that message to special +// email addresses spread across several major email providers. After about +// 24 hours, the test is complete, and you can use the GetDeliverabilityTestReport +// operation to view the results of the test. +// +// // Example sending a request using CreateDeliverabilityTestReportRequest. +// req := client.CreateDeliverabilityTestReportRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/CreateDeliverabilityTestReport +func (c *Client) CreateDeliverabilityTestReportRequest(input *CreateDeliverabilityTestReportInput) CreateDeliverabilityTestReportRequest { + op := &aws.Operation{ + Name: opCreateDeliverabilityTestReport, + HTTPMethod: "POST", + HTTPPath: "/v2/email/deliverability-dashboard/test", + } + + if input == nil { + input = &CreateDeliverabilityTestReportInput{} + } + + req := c.newRequest(op, input, &CreateDeliverabilityTestReportOutput{}) + return CreateDeliverabilityTestReportRequest{Request: req, Input: input, Copy: c.CreateDeliverabilityTestReportRequest} +} + +// CreateDeliverabilityTestReportRequest is the request type for the +// CreateDeliverabilityTestReport API operation. +type CreateDeliverabilityTestReportRequest struct { + *aws.Request + Input *CreateDeliverabilityTestReportInput + Copy func(*CreateDeliverabilityTestReportInput) CreateDeliverabilityTestReportRequest +} + +// Send marshals and sends the CreateDeliverabilityTestReport API request. +func (r CreateDeliverabilityTestReportRequest) Send(ctx context.Context) (*CreateDeliverabilityTestReportResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateDeliverabilityTestReportResponse{ + CreateDeliverabilityTestReportOutput: r.Request.Data.(*CreateDeliverabilityTestReportOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateDeliverabilityTestReportResponse is the response type for the +// CreateDeliverabilityTestReport API operation. +type CreateDeliverabilityTestReportResponse struct { + *CreateDeliverabilityTestReportOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateDeliverabilityTestReport request. +func (r *CreateDeliverabilityTestReportResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_CreateEmailIdentity.go b/service/sesv2/api_op_CreateEmailIdentity.go new file mode 100644 index 00000000000..e6e241da0e2 --- /dev/null +++ b/service/sesv2/api_op_CreateEmailIdentity.go @@ -0,0 +1,210 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to begin the verification process for an email identity (an email +// address or domain). +type CreateEmailIdentityInput struct { + _ struct{} `type:"structure"` + + // The email address or domain that you want to verify. + // + // EmailIdentity is a required field + EmailIdentity *string `type:"string" required:"true"` + + // An array of objects that define the tags (keys and values) that you want + // to associate with the email identity. + Tags []Tag `type:"list"` +} + +// String returns the string representation +func (s CreateEmailIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateEmailIdentityInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateEmailIdentityInput"} + + if s.EmailIdentity == nil { + invalidParams.Add(aws.NewErrParamRequired("EmailIdentity")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateEmailIdentityInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.EmailIdentity != nil { + v := *s.EmailIdentity + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "EmailIdentity", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// If the email identity is a domain, this object contains tokens that you can +// use to create a set of CNAME records. To sucessfully verify your domain, +// you have to add these records to the DNS configuration for your domain. +// +// If the email identity is an email address, this object is empty. +type CreateEmailIdentityOutput struct { + _ struct{} `type:"structure"` + + // An object that contains information about the DKIM attributes for the identity. + // This object includes the tokens that you use to create the CNAME records + // that are required to complete the DKIM verification process. + DkimAttributes *DkimAttributes `type:"structure"` + + // The email identity type. + IdentityType IdentityType `type:"string" enum:"true"` + + // Specifies whether or not the identity is verified. You can only send email + // from verified email addresses or domains. For more information about verifying + // identities, see the Amazon Pinpoint User Guide (https://docs.aws.amazon.com/pinpoint/latest/userguide/channels-email-manage-verify.html). + VerifiedForSendingStatus *bool `type:"boolean"` +} + +// String returns the string representation +func (s CreateEmailIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CreateEmailIdentityOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DkimAttributes != nil { + v := s.DkimAttributes + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DkimAttributes", v, metadata) + } + if len(s.IdentityType) > 0 { + v := s.IdentityType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IdentityType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.VerifiedForSendingStatus != nil { + v := *s.VerifiedForSendingStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VerifiedForSendingStatus", protocol.BoolValue(v), metadata) + } + return nil +} + +const opCreateEmailIdentity = "CreateEmailIdentity" + +// CreateEmailIdentityRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Starts the process of verifying an email identity. An identity is an email +// address or domain that you use when you send email. Before you can use an +// identity to send email, you first have to verify it. By verifying an identity, +// you demonstrate that you're the owner of the identity, and that you've given +// Amazon SES API v2 permission to send email from the identity. +// +// When you verify an email address, Amazon SES sends an email to the address. +// Your email address is verified as soon as you follow the link in the verification +// email. +// +// When you verify a domain, this operation provides a set of DKIM tokens, which +// you can convert into CNAME tokens. You add these CNAME tokens to the DNS +// configuration for your domain. Your domain is verified when Amazon SES detects +// these records in the DNS configuration for your domain. For some DNS providers, +// it can take 72 hours or more to complete the domain verification process. +// +// // Example sending a request using CreateEmailIdentityRequest. +// req := client.CreateEmailIdentityRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/CreateEmailIdentity +func (c *Client) CreateEmailIdentityRequest(input *CreateEmailIdentityInput) CreateEmailIdentityRequest { + op := &aws.Operation{ + Name: opCreateEmailIdentity, + HTTPMethod: "POST", + HTTPPath: "/v2/email/identities", + } + + if input == nil { + input = &CreateEmailIdentityInput{} + } + + req := c.newRequest(op, input, &CreateEmailIdentityOutput{}) + return CreateEmailIdentityRequest{Request: req, Input: input, Copy: c.CreateEmailIdentityRequest} +} + +// CreateEmailIdentityRequest is the request type for the +// CreateEmailIdentity API operation. +type CreateEmailIdentityRequest struct { + *aws.Request + Input *CreateEmailIdentityInput + Copy func(*CreateEmailIdentityInput) CreateEmailIdentityRequest +} + +// Send marshals and sends the CreateEmailIdentity API request. +func (r CreateEmailIdentityRequest) Send(ctx context.Context) (*CreateEmailIdentityResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateEmailIdentityResponse{ + CreateEmailIdentityOutput: r.Request.Data.(*CreateEmailIdentityOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateEmailIdentityResponse is the response type for the +// CreateEmailIdentity API operation. +type CreateEmailIdentityResponse struct { + *CreateEmailIdentityOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateEmailIdentity request. +func (r *CreateEmailIdentityResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_DeleteConfigurationSet.go b/service/sesv2/api_op_DeleteConfigurationSet.go new file mode 100644 index 00000000000..fd1e04555c4 --- /dev/null +++ b/service/sesv2/api_op_DeleteConfigurationSet.go @@ -0,0 +1,143 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to delete a configuration set. +type DeleteConfigurationSetInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that you want to delete. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `location:"uri" locationName:"ConfigurationSetName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConfigurationSetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConfigurationSetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteConfigurationSetInput"} + + if s.ConfigurationSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfigurationSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteConfigurationSetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type DeleteConfigurationSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConfigurationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteConfigurationSetOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteConfigurationSet = "DeleteConfigurationSet" + +// DeleteConfigurationSetRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Delete an existing configuration set. +// +// Configuration sets are groups of rules that you can apply to the emails you +// send. You apply a configuration set to an email by including a reference +// to the configuration set in the headers of the email. When you apply a configuration +// set to an email, all of the rules in that configuration set are applied to +// the email. +// +// // Example sending a request using DeleteConfigurationSetRequest. +// req := client.DeleteConfigurationSetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/DeleteConfigurationSet +func (c *Client) DeleteConfigurationSetRequest(input *DeleteConfigurationSetInput) DeleteConfigurationSetRequest { + op := &aws.Operation{ + Name: opDeleteConfigurationSet, + HTTPMethod: "DELETE", + HTTPPath: "/v2/email/configuration-sets/{ConfigurationSetName}", + } + + if input == nil { + input = &DeleteConfigurationSetInput{} + } + + req := c.newRequest(op, input, &DeleteConfigurationSetOutput{}) + return DeleteConfigurationSetRequest{Request: req, Input: input, Copy: c.DeleteConfigurationSetRequest} +} + +// DeleteConfigurationSetRequest is the request type for the +// DeleteConfigurationSet API operation. +type DeleteConfigurationSetRequest struct { + *aws.Request + Input *DeleteConfigurationSetInput + Copy func(*DeleteConfigurationSetInput) DeleteConfigurationSetRequest +} + +// Send marshals and sends the DeleteConfigurationSet API request. +func (r DeleteConfigurationSetRequest) Send(ctx context.Context) (*DeleteConfigurationSetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteConfigurationSetResponse{ + DeleteConfigurationSetOutput: r.Request.Data.(*DeleteConfigurationSetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteConfigurationSetResponse is the response type for the +// DeleteConfigurationSet API operation. +type DeleteConfigurationSetResponse struct { + *DeleteConfigurationSetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteConfigurationSet request. +func (r *DeleteConfigurationSetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_DeleteConfigurationSetEventDestination.go b/service/sesv2/api_op_DeleteConfigurationSetEventDestination.go new file mode 100644 index 00000000000..4b7fb83ed94 --- /dev/null +++ b/service/sesv2/api_op_DeleteConfigurationSetEventDestination.go @@ -0,0 +1,159 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to delete an event destination from a configuration set. +type DeleteConfigurationSetEventDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that contains the event destination that + // you want to delete. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `location:"uri" locationName:"ConfigurationSetName" type:"string" required:"true"` + + // The name of the event destination that you want to delete. + // + // EventDestinationName is a required field + EventDestinationName *string `location:"uri" locationName:"EventDestinationName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConfigurationSetEventDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConfigurationSetEventDestinationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteConfigurationSetEventDestinationInput"} + + if s.ConfigurationSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfigurationSetName")) + } + + if s.EventDestinationName == nil { + invalidParams.Add(aws.NewErrParamRequired("EventDestinationName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteConfigurationSetEventDestinationInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.EventDestinationName != nil { + v := *s.EventDestinationName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "EventDestinationName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type DeleteConfigurationSetEventDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConfigurationSetEventDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteConfigurationSetEventDestinationOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteConfigurationSetEventDestination = "DeleteConfigurationSetEventDestination" + +// DeleteConfigurationSetEventDestinationRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Delete an event destination. +// +// Events include message sends, deliveries, opens, clicks, bounces, and complaints. +// Event destinations are places that you can send information about these events +// to. For example, you can send event data to Amazon SNS to receive notifications +// when you receive bounces or complaints, or you can use Amazon Kinesis Data +// Firehose to stream data to Amazon S3 for long-term storage. +// +// // Example sending a request using DeleteConfigurationSetEventDestinationRequest. +// req := client.DeleteConfigurationSetEventDestinationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/DeleteConfigurationSetEventDestination +func (c *Client) DeleteConfigurationSetEventDestinationRequest(input *DeleteConfigurationSetEventDestinationInput) DeleteConfigurationSetEventDestinationRequest { + op := &aws.Operation{ + Name: opDeleteConfigurationSetEventDestination, + HTTPMethod: "DELETE", + HTTPPath: "/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations/{EventDestinationName}", + } + + if input == nil { + input = &DeleteConfigurationSetEventDestinationInput{} + } + + req := c.newRequest(op, input, &DeleteConfigurationSetEventDestinationOutput{}) + return DeleteConfigurationSetEventDestinationRequest{Request: req, Input: input, Copy: c.DeleteConfigurationSetEventDestinationRequest} +} + +// DeleteConfigurationSetEventDestinationRequest is the request type for the +// DeleteConfigurationSetEventDestination API operation. +type DeleteConfigurationSetEventDestinationRequest struct { + *aws.Request + Input *DeleteConfigurationSetEventDestinationInput + Copy func(*DeleteConfigurationSetEventDestinationInput) DeleteConfigurationSetEventDestinationRequest +} + +// Send marshals and sends the DeleteConfigurationSetEventDestination API request. +func (r DeleteConfigurationSetEventDestinationRequest) Send(ctx context.Context) (*DeleteConfigurationSetEventDestinationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteConfigurationSetEventDestinationResponse{ + DeleteConfigurationSetEventDestinationOutput: r.Request.Data.(*DeleteConfigurationSetEventDestinationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteConfigurationSetEventDestinationResponse is the response type for the +// DeleteConfigurationSetEventDestination API operation. +type DeleteConfigurationSetEventDestinationResponse struct { + *DeleteConfigurationSetEventDestinationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteConfigurationSetEventDestination request. +func (r *DeleteConfigurationSetEventDestinationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_DeleteDedicatedIpPool.go b/service/sesv2/api_op_DeleteDedicatedIpPool.go new file mode 100644 index 00000000000..649229f5c9f --- /dev/null +++ b/service/sesv2/api_op_DeleteDedicatedIpPool.go @@ -0,0 +1,137 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to delete a dedicated IP pool. +type DeleteDedicatedIpPoolInput struct { + _ struct{} `type:"structure"` + + // The name of the dedicated IP pool that you want to delete. + // + // PoolName is a required field + PoolName *string `location:"uri" locationName:"PoolName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDedicatedIpPoolInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDedicatedIpPoolInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteDedicatedIpPoolInput"} + + if s.PoolName == nil { + invalidParams.Add(aws.NewErrParamRequired("PoolName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteDedicatedIpPoolInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.PoolName != nil { + v := *s.PoolName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "PoolName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type DeleteDedicatedIpPoolOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDedicatedIpPoolOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteDedicatedIpPoolOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteDedicatedIpPool = "DeleteDedicatedIpPool" + +// DeleteDedicatedIpPoolRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Delete a dedicated IP pool. +// +// // Example sending a request using DeleteDedicatedIpPoolRequest. +// req := client.DeleteDedicatedIpPoolRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/DeleteDedicatedIpPool +func (c *Client) DeleteDedicatedIpPoolRequest(input *DeleteDedicatedIpPoolInput) DeleteDedicatedIpPoolRequest { + op := &aws.Operation{ + Name: opDeleteDedicatedIpPool, + HTTPMethod: "DELETE", + HTTPPath: "/v2/email/dedicated-ip-pools/{PoolName}", + } + + if input == nil { + input = &DeleteDedicatedIpPoolInput{} + } + + req := c.newRequest(op, input, &DeleteDedicatedIpPoolOutput{}) + return DeleteDedicatedIpPoolRequest{Request: req, Input: input, Copy: c.DeleteDedicatedIpPoolRequest} +} + +// DeleteDedicatedIpPoolRequest is the request type for the +// DeleteDedicatedIpPool API operation. +type DeleteDedicatedIpPoolRequest struct { + *aws.Request + Input *DeleteDedicatedIpPoolInput + Copy func(*DeleteDedicatedIpPoolInput) DeleteDedicatedIpPoolRequest +} + +// Send marshals and sends the DeleteDedicatedIpPool API request. +func (r DeleteDedicatedIpPoolRequest) Send(ctx context.Context) (*DeleteDedicatedIpPoolResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteDedicatedIpPoolResponse{ + DeleteDedicatedIpPoolOutput: r.Request.Data.(*DeleteDedicatedIpPoolOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteDedicatedIpPoolResponse is the response type for the +// DeleteDedicatedIpPool API operation. +type DeleteDedicatedIpPoolResponse struct { + *DeleteDedicatedIpPoolOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteDedicatedIpPool request. +func (r *DeleteDedicatedIpPoolResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_DeleteEmailIdentity.go b/service/sesv2/api_op_DeleteEmailIdentity.go new file mode 100644 index 00000000000..79785460012 --- /dev/null +++ b/service/sesv2/api_op_DeleteEmailIdentity.go @@ -0,0 +1,141 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to delete an existing email identity. When you delete an identity, +// you lose the ability to send email from that identity. You can restore your +// ability to send email by completing the verification process for the identity +// again. +type DeleteEmailIdentityInput struct { + _ struct{} `type:"structure"` + + // The identity (that is, the email address or domain) that you want to delete. + // + // EmailIdentity is a required field + EmailIdentity *string `location:"uri" locationName:"EmailIdentity" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteEmailIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteEmailIdentityInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteEmailIdentityInput"} + + if s.EmailIdentity == nil { + invalidParams.Add(aws.NewErrParamRequired("EmailIdentity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteEmailIdentityInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.EmailIdentity != nil { + v := *s.EmailIdentity + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "EmailIdentity", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type DeleteEmailIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteEmailIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeleteEmailIdentityOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opDeleteEmailIdentity = "DeleteEmailIdentity" + +// DeleteEmailIdentityRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Deletes an email identity. An identity can be either an email address or +// a domain name. +// +// // Example sending a request using DeleteEmailIdentityRequest. +// req := client.DeleteEmailIdentityRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/DeleteEmailIdentity +func (c *Client) DeleteEmailIdentityRequest(input *DeleteEmailIdentityInput) DeleteEmailIdentityRequest { + op := &aws.Operation{ + Name: opDeleteEmailIdentity, + HTTPMethod: "DELETE", + HTTPPath: "/v2/email/identities/{EmailIdentity}", + } + + if input == nil { + input = &DeleteEmailIdentityInput{} + } + + req := c.newRequest(op, input, &DeleteEmailIdentityOutput{}) + return DeleteEmailIdentityRequest{Request: req, Input: input, Copy: c.DeleteEmailIdentityRequest} +} + +// DeleteEmailIdentityRequest is the request type for the +// DeleteEmailIdentity API operation. +type DeleteEmailIdentityRequest struct { + *aws.Request + Input *DeleteEmailIdentityInput + Copy func(*DeleteEmailIdentityInput) DeleteEmailIdentityRequest +} + +// Send marshals and sends the DeleteEmailIdentity API request. +func (r DeleteEmailIdentityRequest) Send(ctx context.Context) (*DeleteEmailIdentityResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteEmailIdentityResponse{ + DeleteEmailIdentityOutput: r.Request.Data.(*DeleteEmailIdentityOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteEmailIdentityResponse is the response type for the +// DeleteEmailIdentity API operation. +type DeleteEmailIdentityResponse struct { + *DeleteEmailIdentityOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteEmailIdentity request. +func (r *DeleteEmailIdentityResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_GetAccount.go b/service/sesv2/api_op_GetAccount.go new file mode 100644 index 00000000000..0338daebf1e --- /dev/null +++ b/service/sesv2/api_op_GetAccount.go @@ -0,0 +1,187 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to obtain information about the email-sending capabilities of your +// Amazon SES account. +type GetAccountInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountInput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetAccountInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + return nil +} + +// A list of details about the email-sending capabilities of your Amazon SES +// account in the current AWS Region. +type GetAccountOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether or not the automatic warm-up feature is enabled for dedicated + // IP addresses that are associated with your account. + DedicatedIpAutoWarmupEnabled *bool `type:"boolean"` + + // The reputation status of your Amazon SES account. The status can be one of + // the following: + // + // * HEALTHY – There are no reputation-related issues that currently impact + // your account. + // + // * PROBATION – We've identified potential issues with your Amazon SES + // account. We're placing your account under review while you work on correcting + // these issues. + // + // * SHUTDOWN – Your account's ability to send email is currently paused + // because of an issue with the email sent from your account. When you correct + // the issue, you can contact us and request that your account's ability + // to send email is resumed. + EnforcementStatus *string `type:"string"` + + // Indicates whether or not your account has production access in the current + // AWS Region. + // + // If the value is false, then your account is in the sandbox. When your account + // is in the sandbox, you can only send email to verified identities. Additionally, + // the maximum number of emails you can send in a 24-hour period (your sending + // quota) is 200, and the maximum number of emails you can send per second (your + // maximum sending rate) is 1. + // + // If the value is true, then your account has production access. When your + // account has production access, you can send email to any address. The sending + // quota and maximum sending rate for your account vary based on your specific + // use case. + ProductionAccessEnabled *bool `type:"boolean"` + + // An object that contains information about the per-day and per-second sending + // limits for your Amazon SES account in the current AWS Region. + SendQuota *SendQuota `type:"structure"` + + // Indicates whether or not email sending is enabled for your Amazon SES account + // in the current AWS Region. + SendingEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetAccountOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetAccountOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DedicatedIpAutoWarmupEnabled != nil { + v := *s.DedicatedIpAutoWarmupEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DedicatedIpAutoWarmupEnabled", protocol.BoolValue(v), metadata) + } + if s.EnforcementStatus != nil { + v := *s.EnforcementStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "EnforcementStatus", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ProductionAccessEnabled != nil { + v := *s.ProductionAccessEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ProductionAccessEnabled", protocol.BoolValue(v), metadata) + } + if s.SendQuota != nil { + v := s.SendQuota + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SendQuota", v, metadata) + } + if s.SendingEnabled != nil { + v := *s.SendingEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SendingEnabled", protocol.BoolValue(v), metadata) + } + return nil +} + +const opGetAccount = "GetAccount" + +// GetAccountRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Obtain information about the email-sending status and capabilities of your +// Amazon SES account in the current AWS Region. +// +// // Example sending a request using GetAccountRequest. +// req := client.GetAccountRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/GetAccount +func (c *Client) GetAccountRequest(input *GetAccountInput) GetAccountRequest { + op := &aws.Operation{ + Name: opGetAccount, + HTTPMethod: "GET", + HTTPPath: "/v2/email/account", + } + + if input == nil { + input = &GetAccountInput{} + } + + req := c.newRequest(op, input, &GetAccountOutput{}) + return GetAccountRequest{Request: req, Input: input, Copy: c.GetAccountRequest} +} + +// GetAccountRequest is the request type for the +// GetAccount API operation. +type GetAccountRequest struct { + *aws.Request + Input *GetAccountInput + Copy func(*GetAccountInput) GetAccountRequest +} + +// Send marshals and sends the GetAccount API request. +func (r GetAccountRequest) Send(ctx context.Context) (*GetAccountResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetAccountResponse{ + GetAccountOutput: r.Request.Data.(*GetAccountOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetAccountResponse is the response type for the +// GetAccount API operation. +type GetAccountResponse struct { + *GetAccountOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetAccount request. +func (r *GetAccountResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_GetBlacklistReports.go b/service/sesv2/api_op_GetBlacklistReports.go new file mode 100644 index 00000000000..f6811e6c3db --- /dev/null +++ b/service/sesv2/api_op_GetBlacklistReports.go @@ -0,0 +1,169 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to retrieve a list of the blacklists that your dedicated IP addresses +// appear on. +type GetBlacklistReportsInput struct { + _ struct{} `type:"structure"` + + // A list of IP addresses that you want to retrieve blacklist information about. + // You can only specify the dedicated IP addresses that you use to send email + // using Amazon SES or Amazon Pinpoint. + // + // BlacklistItemNames is a required field + BlacklistItemNames []string `location:"querystring" locationName:"BlacklistItemNames" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBlacklistReportsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBlacklistReportsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetBlacklistReportsInput"} + + if s.BlacklistItemNames == nil { + invalidParams.Add(aws.NewErrParamRequired("BlacklistItemNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetBlacklistReportsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.BlacklistItemNames != nil { + v := s.BlacklistItemNames + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.QueryTarget, "BlacklistItemNames", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +// An object that contains information about blacklist events. +type GetBlacklistReportsOutput struct { + _ struct{} `type:"structure"` + + // An object that contains information about a blacklist that one of your dedicated + // IP addresses appears on. + // + // BlacklistReport is a required field + BlacklistReport map[string][]BlacklistEntry `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetBlacklistReportsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetBlacklistReportsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.BlacklistReport != nil { + v := s.BlacklistReport + + metadata := protocol.Metadata{} + ms0 := e.Map(protocol.BodyTarget, "BlacklistReport", metadata) + ms0.Start() + for k1, v1 := range v { + ls1 := ms0.List(k1) + ls1.Start() + for _, v2 := range v1 { + ls1.ListAddFields(v2) + } + ls1.End() + } + ms0.End() + + } + return nil +} + +const opGetBlacklistReports = "GetBlacklistReports" + +// GetBlacklistReportsRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Retrieve a list of the blacklists that your dedicated IP addresses appear +// on. +// +// // Example sending a request using GetBlacklistReportsRequest. +// req := client.GetBlacklistReportsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/GetBlacklistReports +func (c *Client) GetBlacklistReportsRequest(input *GetBlacklistReportsInput) GetBlacklistReportsRequest { + op := &aws.Operation{ + Name: opGetBlacklistReports, + HTTPMethod: "GET", + HTTPPath: "/v2/email/deliverability-dashboard/blacklist-report", + } + + if input == nil { + input = &GetBlacklistReportsInput{} + } + + req := c.newRequest(op, input, &GetBlacklistReportsOutput{}) + return GetBlacklistReportsRequest{Request: req, Input: input, Copy: c.GetBlacklistReportsRequest} +} + +// GetBlacklistReportsRequest is the request type for the +// GetBlacklistReports API operation. +type GetBlacklistReportsRequest struct { + *aws.Request + Input *GetBlacklistReportsInput + Copy func(*GetBlacklistReportsInput) GetBlacklistReportsRequest +} + +// Send marshals and sends the GetBlacklistReports API request. +func (r GetBlacklistReportsRequest) Send(ctx context.Context) (*GetBlacklistReportsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetBlacklistReportsResponse{ + GetBlacklistReportsOutput: r.Request.Data.(*GetBlacklistReportsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetBlacklistReportsResponse is the response type for the +// GetBlacklistReports API operation. +type GetBlacklistReportsResponse struct { + *GetBlacklistReportsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetBlacklistReports request. +func (r *GetBlacklistReportsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_GetConfigurationSet.go b/service/sesv2/api_op_GetConfigurationSet.go new file mode 100644 index 00000000000..4c27c748aa1 --- /dev/null +++ b/service/sesv2/api_op_GetConfigurationSet.go @@ -0,0 +1,210 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to obtain information about a configuration set. +type GetConfigurationSetInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that you want to obtain more information + // about. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `location:"uri" locationName:"ConfigurationSetName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetConfigurationSetInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetConfigurationSetInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetConfigurationSetInput"} + + if s.ConfigurationSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfigurationSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetConfigurationSetInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Information about a configuration set. +type GetConfigurationSetOutput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set. + ConfigurationSetName *string `type:"string"` + + // An object that defines the dedicated IP pool that is used to send emails + // that you send using the configuration set. + DeliveryOptions *DeliveryOptions `type:"structure"` + + // An object that defines whether or not Amazon SES collects reputation metrics + // for the emails that you send that use the configuration set. + ReputationOptions *ReputationOptions `type:"structure"` + + // An object that defines whether or not Amazon SES can send email that you + // send using the configuration set. + SendingOptions *SendingOptions `type:"structure"` + + // An array of objects that define the tags (keys and values) that are associated + // with the configuration set. + Tags []Tag `type:"list"` + + // An object that defines the open and click tracking options for emails that + // you send using the configuration set. + TrackingOptions *TrackingOptions `type:"structure"` +} + +// String returns the string representation +func (s GetConfigurationSetOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetConfigurationSetOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DeliveryOptions != nil { + v := s.DeliveryOptions + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DeliveryOptions", v, metadata) + } + if s.ReputationOptions != nil { + v := s.ReputationOptions + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "ReputationOptions", v, metadata) + } + if s.SendingOptions != nil { + v := s.SendingOptions + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SendingOptions", v, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.TrackingOptions != nil { + v := s.TrackingOptions + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "TrackingOptions", v, metadata) + } + return nil +} + +const opGetConfigurationSet = "GetConfigurationSet" + +// GetConfigurationSetRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Get information about an existing configuration set, including the dedicated +// IP pool that it's associated with, whether or not it's enabled for sending +// email, and more. +// +// Configuration sets are groups of rules that you can apply to the emails you +// send. You apply a configuration set to an email by including a reference +// to the configuration set in the headers of the email. When you apply a configuration +// set to an email, all of the rules in that configuration set are applied to +// the email. +// +// // Example sending a request using GetConfigurationSetRequest. +// req := client.GetConfigurationSetRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/GetConfigurationSet +func (c *Client) GetConfigurationSetRequest(input *GetConfigurationSetInput) GetConfigurationSetRequest { + op := &aws.Operation{ + Name: opGetConfigurationSet, + HTTPMethod: "GET", + HTTPPath: "/v2/email/configuration-sets/{ConfigurationSetName}", + } + + if input == nil { + input = &GetConfigurationSetInput{} + } + + req := c.newRequest(op, input, &GetConfigurationSetOutput{}) + return GetConfigurationSetRequest{Request: req, Input: input, Copy: c.GetConfigurationSetRequest} +} + +// GetConfigurationSetRequest is the request type for the +// GetConfigurationSet API operation. +type GetConfigurationSetRequest struct { + *aws.Request + Input *GetConfigurationSetInput + Copy func(*GetConfigurationSetInput) GetConfigurationSetRequest +} + +// Send marshals and sends the GetConfigurationSet API request. +func (r GetConfigurationSetRequest) Send(ctx context.Context) (*GetConfigurationSetResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetConfigurationSetResponse{ + GetConfigurationSetOutput: r.Request.Data.(*GetConfigurationSetOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetConfigurationSetResponse is the response type for the +// GetConfigurationSet API operation. +type GetConfigurationSetResponse struct { + *GetConfigurationSetOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetConfigurationSet request. +func (r *GetConfigurationSetResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_GetConfigurationSetEventDestinations.go b/service/sesv2/api_op_GetConfigurationSetEventDestinations.go new file mode 100644 index 00000000000..6f02f36817a --- /dev/null +++ b/service/sesv2/api_op_GetConfigurationSetEventDestinations.go @@ -0,0 +1,160 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to obtain information about the event destinations for a configuration +// set. +type GetConfigurationSetEventDestinationsInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that contains the event destination. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `location:"uri" locationName:"ConfigurationSetName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetConfigurationSetEventDestinationsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetConfigurationSetEventDestinationsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetConfigurationSetEventDestinationsInput"} + + if s.ConfigurationSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfigurationSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetConfigurationSetEventDestinationsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Information about an event destination for a configuration set. +type GetConfigurationSetEventDestinationsOutput struct { + _ struct{} `type:"structure"` + + // An array that includes all of the events destinations that have been configured + // for the configuration set. + EventDestinations []EventDestination `type:"list"` +} + +// String returns the string representation +func (s GetConfigurationSetEventDestinationsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetConfigurationSetEventDestinationsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.EventDestinations != nil { + v := s.EventDestinations + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "EventDestinations", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opGetConfigurationSetEventDestinations = "GetConfigurationSetEventDestinations" + +// GetConfigurationSetEventDestinationsRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Retrieve a list of event destinations that are associated with a configuration +// set. +// +// Events include message sends, deliveries, opens, clicks, bounces, and complaints. +// Event destinations are places that you can send information about these events +// to. For example, you can send event data to Amazon SNS to receive notifications +// when you receive bounces or complaints, or you can use Amazon Kinesis Data +// Firehose to stream data to Amazon S3 for long-term storage. +// +// // Example sending a request using GetConfigurationSetEventDestinationsRequest. +// req := client.GetConfigurationSetEventDestinationsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/GetConfigurationSetEventDestinations +func (c *Client) GetConfigurationSetEventDestinationsRequest(input *GetConfigurationSetEventDestinationsInput) GetConfigurationSetEventDestinationsRequest { + op := &aws.Operation{ + Name: opGetConfigurationSetEventDestinations, + HTTPMethod: "GET", + HTTPPath: "/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations", + } + + if input == nil { + input = &GetConfigurationSetEventDestinationsInput{} + } + + req := c.newRequest(op, input, &GetConfigurationSetEventDestinationsOutput{}) + return GetConfigurationSetEventDestinationsRequest{Request: req, Input: input, Copy: c.GetConfigurationSetEventDestinationsRequest} +} + +// GetConfigurationSetEventDestinationsRequest is the request type for the +// GetConfigurationSetEventDestinations API operation. +type GetConfigurationSetEventDestinationsRequest struct { + *aws.Request + Input *GetConfigurationSetEventDestinationsInput + Copy func(*GetConfigurationSetEventDestinationsInput) GetConfigurationSetEventDestinationsRequest +} + +// Send marshals and sends the GetConfigurationSetEventDestinations API request. +func (r GetConfigurationSetEventDestinationsRequest) Send(ctx context.Context) (*GetConfigurationSetEventDestinationsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetConfigurationSetEventDestinationsResponse{ + GetConfigurationSetEventDestinationsOutput: r.Request.Data.(*GetConfigurationSetEventDestinationsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetConfigurationSetEventDestinationsResponse is the response type for the +// GetConfigurationSetEventDestinations API operation. +type GetConfigurationSetEventDestinationsResponse struct { + *GetConfigurationSetEventDestinationsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetConfigurationSetEventDestinations request. +func (r *GetConfigurationSetEventDestinationsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_GetDedicatedIp.go b/service/sesv2/api_op_GetDedicatedIp.go new file mode 100644 index 00000000000..b6c8a5bead4 --- /dev/null +++ b/service/sesv2/api_op_GetDedicatedIp.go @@ -0,0 +1,149 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to obtain more information about a dedicated IP address. +type GetDedicatedIpInput struct { + _ struct{} `type:"structure"` + + // The IP address that you want to obtain more information about. The value + // you specify has to be a dedicated IP address that's assocaited with your + // AWS account. + // + // Ip is a required field + Ip *string `location:"uri" locationName:"IP" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDedicatedIpInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDedicatedIpInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetDedicatedIpInput"} + + if s.Ip == nil { + invalidParams.Add(aws.NewErrParamRequired("Ip")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDedicatedIpInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Ip != nil { + v := *s.Ip + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "IP", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Information about a dedicated IP address. +type GetDedicatedIpOutput struct { + _ struct{} `type:"structure"` + + // An object that contains information about a dedicated IP address. + DedicatedIp *DedicatedIp `type:"structure"` +} + +// String returns the string representation +func (s GetDedicatedIpOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDedicatedIpOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DedicatedIp != nil { + v := s.DedicatedIp + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DedicatedIp", v, metadata) + } + return nil +} + +const opGetDedicatedIp = "GetDedicatedIp" + +// GetDedicatedIpRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Get information about a dedicated IP address, including the name of the dedicated +// IP pool that it's associated with, as well information about the automatic +// warm-up process for the address. +// +// // Example sending a request using GetDedicatedIpRequest. +// req := client.GetDedicatedIpRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/GetDedicatedIp +func (c *Client) GetDedicatedIpRequest(input *GetDedicatedIpInput) GetDedicatedIpRequest { + op := &aws.Operation{ + Name: opGetDedicatedIp, + HTTPMethod: "GET", + HTTPPath: "/v2/email/dedicated-ips/{IP}", + } + + if input == nil { + input = &GetDedicatedIpInput{} + } + + req := c.newRequest(op, input, &GetDedicatedIpOutput{}) + return GetDedicatedIpRequest{Request: req, Input: input, Copy: c.GetDedicatedIpRequest} +} + +// GetDedicatedIpRequest is the request type for the +// GetDedicatedIp API operation. +type GetDedicatedIpRequest struct { + *aws.Request + Input *GetDedicatedIpInput + Copy func(*GetDedicatedIpInput) GetDedicatedIpRequest +} + +// Send marshals and sends the GetDedicatedIp API request. +func (r GetDedicatedIpRequest) Send(ctx context.Context) (*GetDedicatedIpResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetDedicatedIpResponse{ + GetDedicatedIpOutput: r.Request.Data.(*GetDedicatedIpOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetDedicatedIpResponse is the response type for the +// GetDedicatedIp API operation. +type GetDedicatedIpResponse struct { + *GetDedicatedIpOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetDedicatedIp request. +func (r *GetDedicatedIpResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_GetDedicatedIps.go b/service/sesv2/api_op_GetDedicatedIps.go new file mode 100644 index 00000000000..ab1428692ed --- /dev/null +++ b/service/sesv2/api_op_GetDedicatedIps.go @@ -0,0 +1,222 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to obtain more information about dedicated IP pools. +type GetDedicatedIpsInput struct { + _ struct{} `type:"structure"` + + // A token returned from a previous call to GetDedicatedIps to indicate the + // position of the dedicated IP pool in the list of IP pools. + NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` + + // The number of results to show in a single call to GetDedicatedIpsRequest. + // If the number of results is larger than the number you specified in this + // parameter, then the response includes a NextToken element, which you can + // use to obtain additional results. + PageSize *int64 `location:"querystring" locationName:"PageSize" type:"integer"` + + // The name of the IP pool that the dedicated IP address is associated with. + PoolName *string `location:"querystring" locationName:"PoolName" type:"string"` +} + +// String returns the string representation +func (s GetDedicatedIpsInput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDedicatedIpsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PageSize != nil { + v := *s.PageSize + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "PageSize", protocol.Int64Value(v), metadata) + } + if s.PoolName != nil { + v := *s.PoolName + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "PoolName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Information about the dedicated IP addresses that are associated with your +// AWS account. +type GetDedicatedIpsOutput struct { + _ struct{} `type:"structure"` + + // A list of dedicated IP addresses that are associated with your AWS account. + DedicatedIps []DedicatedIp `type:"list"` + + // A token that indicates that there are additional dedicated IP addresses to + // list. To view additional addresses, issue another request to GetDedicatedIps, + // passing this token in the NextToken parameter. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetDedicatedIpsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDedicatedIpsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DedicatedIps != nil { + v := s.DedicatedIps + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DedicatedIps", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opGetDedicatedIps = "GetDedicatedIps" + +// GetDedicatedIpsRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// List the dedicated IP addresses that are associated with your AWS account. +// +// // Example sending a request using GetDedicatedIpsRequest. +// req := client.GetDedicatedIpsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/GetDedicatedIps +func (c *Client) GetDedicatedIpsRequest(input *GetDedicatedIpsInput) GetDedicatedIpsRequest { + op := &aws.Operation{ + Name: opGetDedicatedIps, + HTTPMethod: "GET", + HTTPPath: "/v2/email/dedicated-ips", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "PageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetDedicatedIpsInput{} + } + + req := c.newRequest(op, input, &GetDedicatedIpsOutput{}) + return GetDedicatedIpsRequest{Request: req, Input: input, Copy: c.GetDedicatedIpsRequest} +} + +// GetDedicatedIpsRequest is the request type for the +// GetDedicatedIps API operation. +type GetDedicatedIpsRequest struct { + *aws.Request + Input *GetDedicatedIpsInput + Copy func(*GetDedicatedIpsInput) GetDedicatedIpsRequest +} + +// Send marshals and sends the GetDedicatedIps API request. +func (r GetDedicatedIpsRequest) Send(ctx context.Context) (*GetDedicatedIpsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetDedicatedIpsResponse{ + GetDedicatedIpsOutput: r.Request.Data.(*GetDedicatedIpsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewGetDedicatedIpsRequestPaginator returns a paginator for GetDedicatedIps. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.GetDedicatedIpsRequest(input) +// p := sesv2.NewGetDedicatedIpsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewGetDedicatedIpsPaginator(req GetDedicatedIpsRequest) GetDedicatedIpsPaginator { + return GetDedicatedIpsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *GetDedicatedIpsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// GetDedicatedIpsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type GetDedicatedIpsPaginator struct { + aws.Pager +} + +func (p *GetDedicatedIpsPaginator) CurrentPage() *GetDedicatedIpsOutput { + return p.Pager.CurrentPage().(*GetDedicatedIpsOutput) +} + +// GetDedicatedIpsResponse is the response type for the +// GetDedicatedIps API operation. +type GetDedicatedIpsResponse struct { + *GetDedicatedIpsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetDedicatedIps request. +func (r *GetDedicatedIpsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_GetDeliverabilityDashboardOptions.go b/service/sesv2/api_op_GetDeliverabilityDashboardOptions.go new file mode 100644 index 00000000000..9648ea09d66 --- /dev/null +++ b/service/sesv2/api_op_GetDeliverabilityDashboardOptions.go @@ -0,0 +1,199 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// Retrieve information about the status of the Deliverability dashboard for +// your AWS account. When the Deliverability dashboard is enabled, you gain +// access to reputation, deliverability, and other metrics for your domains. +// You also gain the ability to perform predictive inbox placement tests. +// +// When you use the Deliverability dashboard, you pay a monthly subscription +// charge, in addition to any other fees that you accrue by using Amazon SES +// and other AWS services. For more information about the features and cost +// of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing (http://aws.amazon.com/pinpoint/pricing/). +type GetDeliverabilityDashboardOptionsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetDeliverabilityDashboardOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDeliverabilityDashboardOptionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + return nil +} + +// An object that shows the status of the Deliverability dashboard. +type GetDeliverabilityDashboardOptionsOutput struct { + _ struct{} `type:"structure"` + + // The current status of your Deliverability dashboard subscription. If this + // value is PENDING_EXPIRATION, your subscription is scheduled to expire at + // the end of the current calendar month. + AccountStatus DeliverabilityDashboardAccountStatus `type:"string" enum:"true"` + + // An array of objects, one for each verified domain that you use to send email + // and currently has an active Deliverability dashboard subscription that isn’t + // scheduled to expire at the end of the current calendar month. + ActiveSubscribedDomains []DomainDeliverabilityTrackingOption `type:"list"` + + // Specifies whether the Deliverability dashboard is enabled. If this value + // is true, the dashboard is enabled. + // + // DashboardEnabled is a required field + DashboardEnabled *bool `type:"boolean" required:"true"` + + // An array of objects, one for each verified domain that you use to send email + // and currently has an active Deliverability dashboard subscription that's + // scheduled to expire at the end of the current calendar month. + PendingExpirationSubscribedDomains []DomainDeliverabilityTrackingOption `type:"list"` + + // The date, in Unix time format, when your current subscription to the Deliverability + // dashboard is scheduled to expire, if your subscription is scheduled to expire + // at the end of the current calendar month. This value is null if you have + // an active subscription that isn’t due to expire at the end of the month. + SubscriptionExpiryDate *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s GetDeliverabilityDashboardOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDeliverabilityDashboardOptionsOutput) MarshalFields(e protocol.FieldEncoder) error { + if len(s.AccountStatus) > 0 { + v := s.AccountStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AccountStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.ActiveSubscribedDomains != nil { + v := s.ActiveSubscribedDomains + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ActiveSubscribedDomains", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.DashboardEnabled != nil { + v := *s.DashboardEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardEnabled", protocol.BoolValue(v), metadata) + } + if s.PendingExpirationSubscribedDomains != nil { + v := s.PendingExpirationSubscribedDomains + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "PendingExpirationSubscribedDomains", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.SubscriptionExpiryDate != nil { + v := *s.SubscriptionExpiryDate + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SubscriptionExpiryDate", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + return nil +} + +const opGetDeliverabilityDashboardOptions = "GetDeliverabilityDashboardOptions" + +// GetDeliverabilityDashboardOptionsRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Retrieve information about the status of the Deliverability dashboard for +// your account. When the Deliverability dashboard is enabled, you gain access +// to reputation, deliverability, and other metrics for the domains that you +// use to send email. You also gain the ability to perform predictive inbox +// placement tests. +// +// When you use the Deliverability dashboard, you pay a monthly subscription +// charge, in addition to any other fees that you accrue by using Amazon SES +// and other AWS services. For more information about the features and cost +// of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing (http://aws.amazon.com/pinpoint/pricing/). +// +// // Example sending a request using GetDeliverabilityDashboardOptionsRequest. +// req := client.GetDeliverabilityDashboardOptionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/GetDeliverabilityDashboardOptions +func (c *Client) GetDeliverabilityDashboardOptionsRequest(input *GetDeliverabilityDashboardOptionsInput) GetDeliverabilityDashboardOptionsRequest { + op := &aws.Operation{ + Name: opGetDeliverabilityDashboardOptions, + HTTPMethod: "GET", + HTTPPath: "/v2/email/deliverability-dashboard", + } + + if input == nil { + input = &GetDeliverabilityDashboardOptionsInput{} + } + + req := c.newRequest(op, input, &GetDeliverabilityDashboardOptionsOutput{}) + return GetDeliverabilityDashboardOptionsRequest{Request: req, Input: input, Copy: c.GetDeliverabilityDashboardOptionsRequest} +} + +// GetDeliverabilityDashboardOptionsRequest is the request type for the +// GetDeliverabilityDashboardOptions API operation. +type GetDeliverabilityDashboardOptionsRequest struct { + *aws.Request + Input *GetDeliverabilityDashboardOptionsInput + Copy func(*GetDeliverabilityDashboardOptionsInput) GetDeliverabilityDashboardOptionsRequest +} + +// Send marshals and sends the GetDeliverabilityDashboardOptions API request. +func (r GetDeliverabilityDashboardOptionsRequest) Send(ctx context.Context) (*GetDeliverabilityDashboardOptionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetDeliverabilityDashboardOptionsResponse{ + GetDeliverabilityDashboardOptionsOutput: r.Request.Data.(*GetDeliverabilityDashboardOptionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetDeliverabilityDashboardOptionsResponse is the response type for the +// GetDeliverabilityDashboardOptions API operation. +type GetDeliverabilityDashboardOptionsResponse struct { + *GetDeliverabilityDashboardOptionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetDeliverabilityDashboardOptions request. +func (r *GetDeliverabilityDashboardOptionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_GetDeliverabilityTestReport.go b/service/sesv2/api_op_GetDeliverabilityTestReport.go new file mode 100644 index 00000000000..770f96c459a --- /dev/null +++ b/service/sesv2/api_op_GetDeliverabilityTestReport.go @@ -0,0 +1,204 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to retrieve the results of a predictive inbox placement test. +type GetDeliverabilityTestReportInput struct { + _ struct{} `type:"structure"` + + // A unique string that identifies the predictive inbox placement test. + // + // ReportId is a required field + ReportId *string `location:"uri" locationName:"ReportId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDeliverabilityTestReportInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDeliverabilityTestReportInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetDeliverabilityTestReportInput"} + + if s.ReportId == nil { + invalidParams.Add(aws.NewErrParamRequired("ReportId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDeliverabilityTestReportInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ReportId != nil { + v := *s.ReportId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ReportId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// The results of the predictive inbox placement test. +type GetDeliverabilityTestReportOutput struct { + _ struct{} `type:"structure"` + + // An object that contains the results of the predictive inbox placement test. + // + // DeliverabilityTestReport is a required field + DeliverabilityTestReport *DeliverabilityTestReport `type:"structure" required:"true"` + + // An object that describes how the test email was handled by several email + // providers, including Gmail, Hotmail, Yahoo, AOL, and others. + // + // IspPlacements is a required field + IspPlacements []IspPlacement `type:"list" required:"true"` + + // An object that contains the message that you sent when you performed this + // predictive inbox placement test. + Message *string `type:"string"` + + // An object that specifies how many test messages that were sent during the + // predictive inbox placement test were delivered to recipients' inboxes, how + // many were sent to recipients' spam folders, and how many weren't delivered. + // + // OverallPlacement is a required field + OverallPlacement *PlacementStatistics `type:"structure" required:"true"` + + // An array of objects that define the tags (keys and values) that are associated + // with the predictive inbox placement test. + Tags []Tag `type:"list"` +} + +// String returns the string representation +func (s GetDeliverabilityTestReportOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDeliverabilityTestReportOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DeliverabilityTestReport != nil { + v := s.DeliverabilityTestReport + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DeliverabilityTestReport", v, metadata) + } + if s.IspPlacements != nil { + v := s.IspPlacements + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "IspPlacements", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.Message != nil { + v := *s.Message + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Message", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.OverallPlacement != nil { + v := s.OverallPlacement + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "OverallPlacement", v, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opGetDeliverabilityTestReport = "GetDeliverabilityTestReport" + +// GetDeliverabilityTestReportRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Retrieve the results of a predictive inbox placement test. +// +// // Example sending a request using GetDeliverabilityTestReportRequest. +// req := client.GetDeliverabilityTestReportRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/GetDeliverabilityTestReport +func (c *Client) GetDeliverabilityTestReportRequest(input *GetDeliverabilityTestReportInput) GetDeliverabilityTestReportRequest { + op := &aws.Operation{ + Name: opGetDeliverabilityTestReport, + HTTPMethod: "GET", + HTTPPath: "/v2/email/deliverability-dashboard/test-reports/{ReportId}", + } + + if input == nil { + input = &GetDeliverabilityTestReportInput{} + } + + req := c.newRequest(op, input, &GetDeliverabilityTestReportOutput{}) + return GetDeliverabilityTestReportRequest{Request: req, Input: input, Copy: c.GetDeliverabilityTestReportRequest} +} + +// GetDeliverabilityTestReportRequest is the request type for the +// GetDeliverabilityTestReport API operation. +type GetDeliverabilityTestReportRequest struct { + *aws.Request + Input *GetDeliverabilityTestReportInput + Copy func(*GetDeliverabilityTestReportInput) GetDeliverabilityTestReportRequest +} + +// Send marshals and sends the GetDeliverabilityTestReport API request. +func (r GetDeliverabilityTestReportRequest) Send(ctx context.Context) (*GetDeliverabilityTestReportResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetDeliverabilityTestReportResponse{ + GetDeliverabilityTestReportOutput: r.Request.Data.(*GetDeliverabilityTestReportOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetDeliverabilityTestReportResponse is the response type for the +// GetDeliverabilityTestReport API operation. +type GetDeliverabilityTestReportResponse struct { + *GetDeliverabilityTestReportOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetDeliverabilityTestReport request. +func (r *GetDeliverabilityTestReportResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_GetDomainDeliverabilityCampaign.go b/service/sesv2/api_op_GetDomainDeliverabilityCampaign.go new file mode 100644 index 00000000000..40c90a3c6e6 --- /dev/null +++ b/service/sesv2/api_op_GetDomainDeliverabilityCampaign.go @@ -0,0 +1,155 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// Retrieve all the deliverability data for a specific campaign. This data is +// available for a campaign only if the campaign sent email by using a domain +// that the Deliverability dashboard is enabled for (PutDeliverabilityDashboardOption +// operation). +type GetDomainDeliverabilityCampaignInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the campaign. The Deliverability dashboard automatically + // generates and assigns this identifier to a campaign. + // + // CampaignId is a required field + CampaignId *string `location:"uri" locationName:"CampaignId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDomainDeliverabilityCampaignInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDomainDeliverabilityCampaignInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetDomainDeliverabilityCampaignInput"} + + if s.CampaignId == nil { + invalidParams.Add(aws.NewErrParamRequired("CampaignId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDomainDeliverabilityCampaignInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.CampaignId != nil { + v := *s.CampaignId + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "CampaignId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An object that contains all the deliverability data for a specific campaign. +// This data is available for a campaign only if the campaign sent email by +// using a domain that the Deliverability dashboard is enabled for. +type GetDomainDeliverabilityCampaignOutput struct { + _ struct{} `type:"structure"` + + // An object that contains the deliverability data for the campaign. + // + // DomainDeliverabilityCampaign is a required field + DomainDeliverabilityCampaign *DomainDeliverabilityCampaign `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetDomainDeliverabilityCampaignOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDomainDeliverabilityCampaignOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DomainDeliverabilityCampaign != nil { + v := s.DomainDeliverabilityCampaign + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DomainDeliverabilityCampaign", v, metadata) + } + return nil +} + +const opGetDomainDeliverabilityCampaign = "GetDomainDeliverabilityCampaign" + +// GetDomainDeliverabilityCampaignRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Retrieve all the deliverability data for a specific campaign. This data is +// available for a campaign only if the campaign sent email by using a domain +// that the Deliverability dashboard is enabled for. +// +// // Example sending a request using GetDomainDeliverabilityCampaignRequest. +// req := client.GetDomainDeliverabilityCampaignRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/GetDomainDeliverabilityCampaign +func (c *Client) GetDomainDeliverabilityCampaignRequest(input *GetDomainDeliverabilityCampaignInput) GetDomainDeliverabilityCampaignRequest { + op := &aws.Operation{ + Name: opGetDomainDeliverabilityCampaign, + HTTPMethod: "GET", + HTTPPath: "/v2/email/deliverability-dashboard/campaigns/{CampaignId}", + } + + if input == nil { + input = &GetDomainDeliverabilityCampaignInput{} + } + + req := c.newRequest(op, input, &GetDomainDeliverabilityCampaignOutput{}) + return GetDomainDeliverabilityCampaignRequest{Request: req, Input: input, Copy: c.GetDomainDeliverabilityCampaignRequest} +} + +// GetDomainDeliverabilityCampaignRequest is the request type for the +// GetDomainDeliverabilityCampaign API operation. +type GetDomainDeliverabilityCampaignRequest struct { + *aws.Request + Input *GetDomainDeliverabilityCampaignInput + Copy func(*GetDomainDeliverabilityCampaignInput) GetDomainDeliverabilityCampaignRequest +} + +// Send marshals and sends the GetDomainDeliverabilityCampaign API request. +func (r GetDomainDeliverabilityCampaignRequest) Send(ctx context.Context) (*GetDomainDeliverabilityCampaignResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetDomainDeliverabilityCampaignResponse{ + GetDomainDeliverabilityCampaignOutput: r.Request.Data.(*GetDomainDeliverabilityCampaignOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetDomainDeliverabilityCampaignResponse is the response type for the +// GetDomainDeliverabilityCampaign API operation. +type GetDomainDeliverabilityCampaignResponse struct { + *GetDomainDeliverabilityCampaignOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetDomainDeliverabilityCampaign request. +func (r *GetDomainDeliverabilityCampaignResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_GetDomainStatisticsReport.go b/service/sesv2/api_op_GetDomainStatisticsReport.go new file mode 100644 index 00000000000..5970cb5fbf5 --- /dev/null +++ b/service/sesv2/api_op_GetDomainStatisticsReport.go @@ -0,0 +1,206 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to obtain deliverability metrics for a domain. +type GetDomainStatisticsReportInput struct { + _ struct{} `type:"structure"` + + // The domain that you want to obtain deliverability metrics for. + // + // Domain is a required field + Domain *string `location:"uri" locationName:"Domain" type:"string" required:"true"` + + // The last day (in Unix time) that you want to obtain domain deliverability + // metrics for. The EndDate that you specify has to be less than or equal to + // 30 days after the StartDate. + // + // EndDate is a required field + EndDate *time.Time `location:"querystring" locationName:"EndDate" type:"timestamp" required:"true"` + + // The first day (in Unix time) that you want to obtain domain deliverability + // metrics for. + // + // StartDate is a required field + StartDate *time.Time `location:"querystring" locationName:"StartDate" type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s GetDomainStatisticsReportInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDomainStatisticsReportInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetDomainStatisticsReportInput"} + + if s.Domain == nil { + invalidParams.Add(aws.NewErrParamRequired("Domain")) + } + + if s.EndDate == nil { + invalidParams.Add(aws.NewErrParamRequired("EndDate")) + } + + if s.StartDate == nil { + invalidParams.Add(aws.NewErrParamRequired("StartDate")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDomainStatisticsReportInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.Domain != nil { + v := *s.Domain + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "Domain", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.EndDate != nil { + v := *s.EndDate + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "EndDate", + protocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata) + } + if s.StartDate != nil { + v := *s.StartDate + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "StartDate", + protocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata) + } + return nil +} + +// An object that includes statistics that are related to the domain that you +// specified. +type GetDomainStatisticsReportOutput struct { + _ struct{} `type:"structure"` + + // An object that contains deliverability metrics for the domain that you specified. + // This object contains data for each day, starting on the StartDate and ending + // on the EndDate. + // + // DailyVolumes is a required field + DailyVolumes []DailyVolume `type:"list" required:"true"` + + // An object that contains deliverability metrics for the domain that you specified. + // The data in this object is a summary of all of the data that was collected + // from the StartDate to the EndDate. + // + // OverallVolume is a required field + OverallVolume *OverallVolume `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetDomainStatisticsReportOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetDomainStatisticsReportOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DailyVolumes != nil { + v := s.DailyVolumes + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DailyVolumes", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.OverallVolume != nil { + v := s.OverallVolume + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "OverallVolume", v, metadata) + } + return nil +} + +const opGetDomainStatisticsReport = "GetDomainStatisticsReport" + +// GetDomainStatisticsReportRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Retrieve inbox placement and engagement rates for the domains that you use +// to send email. +// +// // Example sending a request using GetDomainStatisticsReportRequest. +// req := client.GetDomainStatisticsReportRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/GetDomainStatisticsReport +func (c *Client) GetDomainStatisticsReportRequest(input *GetDomainStatisticsReportInput) GetDomainStatisticsReportRequest { + op := &aws.Operation{ + Name: opGetDomainStatisticsReport, + HTTPMethod: "GET", + HTTPPath: "/v2/email/deliverability-dashboard/statistics-report/{Domain}", + } + + if input == nil { + input = &GetDomainStatisticsReportInput{} + } + + req := c.newRequest(op, input, &GetDomainStatisticsReportOutput{}) + return GetDomainStatisticsReportRequest{Request: req, Input: input, Copy: c.GetDomainStatisticsReportRequest} +} + +// GetDomainStatisticsReportRequest is the request type for the +// GetDomainStatisticsReport API operation. +type GetDomainStatisticsReportRequest struct { + *aws.Request + Input *GetDomainStatisticsReportInput + Copy func(*GetDomainStatisticsReportInput) GetDomainStatisticsReportRequest +} + +// Send marshals and sends the GetDomainStatisticsReport API request. +func (r GetDomainStatisticsReportRequest) Send(ctx context.Context) (*GetDomainStatisticsReportResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetDomainStatisticsReportResponse{ + GetDomainStatisticsReportOutput: r.Request.Data.(*GetDomainStatisticsReportOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetDomainStatisticsReportResponse is the response type for the +// GetDomainStatisticsReport API operation. +type GetDomainStatisticsReportResponse struct { + *GetDomainStatisticsReportOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetDomainStatisticsReport request. +func (r *GetDomainStatisticsReportResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_GetEmailIdentity.go b/service/sesv2/api_op_GetEmailIdentity.go new file mode 100644 index 00000000000..7243627f000 --- /dev/null +++ b/service/sesv2/api_op_GetEmailIdentity.go @@ -0,0 +1,213 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to return details about an email identity. +type GetEmailIdentityInput struct { + _ struct{} `type:"structure"` + + // The email identity that you want to retrieve details for. + // + // EmailIdentity is a required field + EmailIdentity *string `location:"uri" locationName:"EmailIdentity" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetEmailIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetEmailIdentityInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetEmailIdentityInput"} + + if s.EmailIdentity == nil { + invalidParams.Add(aws.NewErrParamRequired("EmailIdentity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetEmailIdentityInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.EmailIdentity != nil { + v := *s.EmailIdentity + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "EmailIdentity", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Details about an email identity. +type GetEmailIdentityOutput struct { + _ struct{} `type:"structure"` + + // An object that contains information about the DKIM attributes for the identity. + // This object includes the tokens that you use to create the CNAME records + // that are required to complete the DKIM verification process. + DkimAttributes *DkimAttributes `type:"structure"` + + // The feedback forwarding configuration for the identity. + // + // If the value is true, you receive email notifications when bounce or complaint + // events occur. These notifications are sent to the address that you specified + // in the Return-Path header of the original email. + // + // You're required to have a method of tracking bounces and complaints. If you + // haven't set up another mechanism for receiving bounce or complaint notifications + // (for example, by setting up an event destination), you receive an email notification + // when these events occur (even if this setting is disabled). + FeedbackForwardingStatus *bool `type:"boolean"` + + // The email identity type. + IdentityType IdentityType `type:"string" enum:"true"` + + // An object that contains information about the Mail-From attributes for the + // email identity. + MailFromAttributes *MailFromAttributes `type:"structure"` + + // An array of objects that define the tags (keys and values) that are associated + // with the email identity. + Tags []Tag `type:"list"` + + // Specifies whether or not the identity is verified. You can only send email + // from verified email addresses or domains. For more information about verifying + // identities, see the Amazon Pinpoint User Guide (https://docs.aws.amazon.com/pinpoint/latest/userguide/channels-email-manage-verify.html). + VerifiedForSendingStatus *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetEmailIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s GetEmailIdentityOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DkimAttributes != nil { + v := s.DkimAttributes + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "DkimAttributes", v, metadata) + } + if s.FeedbackForwardingStatus != nil { + v := *s.FeedbackForwardingStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "FeedbackForwardingStatus", protocol.BoolValue(v), metadata) + } + if len(s.IdentityType) > 0 { + v := s.IdentityType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IdentityType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.MailFromAttributes != nil { + v := s.MailFromAttributes + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "MailFromAttributes", v, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.VerifiedForSendingStatus != nil { + v := *s.VerifiedForSendingStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "VerifiedForSendingStatus", protocol.BoolValue(v), metadata) + } + return nil +} + +const opGetEmailIdentity = "GetEmailIdentity" + +// GetEmailIdentityRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Provides information about a specific identity, including the identity's +// verification status, its DKIM authentication status, and its custom Mail-From +// settings. +// +// // Example sending a request using GetEmailIdentityRequest. +// req := client.GetEmailIdentityRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/GetEmailIdentity +func (c *Client) GetEmailIdentityRequest(input *GetEmailIdentityInput) GetEmailIdentityRequest { + op := &aws.Operation{ + Name: opGetEmailIdentity, + HTTPMethod: "GET", + HTTPPath: "/v2/email/identities/{EmailIdentity}", + } + + if input == nil { + input = &GetEmailIdentityInput{} + } + + req := c.newRequest(op, input, &GetEmailIdentityOutput{}) + return GetEmailIdentityRequest{Request: req, Input: input, Copy: c.GetEmailIdentityRequest} +} + +// GetEmailIdentityRequest is the request type for the +// GetEmailIdentity API operation. +type GetEmailIdentityRequest struct { + *aws.Request + Input *GetEmailIdentityInput + Copy func(*GetEmailIdentityInput) GetEmailIdentityRequest +} + +// Send marshals and sends the GetEmailIdentity API request. +func (r GetEmailIdentityRequest) Send(ctx context.Context) (*GetEmailIdentityResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetEmailIdentityResponse{ + GetEmailIdentityOutput: r.Request.Data.(*GetEmailIdentityOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetEmailIdentityResponse is the response type for the +// GetEmailIdentity API operation. +type GetEmailIdentityResponse struct { + *GetEmailIdentityOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetEmailIdentity request. +func (r *GetEmailIdentityResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_ListConfigurationSets.go b/service/sesv2/api_op_ListConfigurationSets.go new file mode 100644 index 00000000000..f8960ae58b2 --- /dev/null +++ b/service/sesv2/api_op_ListConfigurationSets.go @@ -0,0 +1,222 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to obtain a list of configuration sets for your Amazon SES account +// in the current AWS Region. +type ListConfigurationSetsInput struct { + _ struct{} `type:"structure"` + + // A token returned from a previous call to ListConfigurationSets to indicate + // the position in the list of configuration sets. + NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` + + // The number of results to show in a single call to ListConfigurationSets. + // If the number of results is larger than the number you specified in this + // parameter, then the response includes a NextToken element, which you can + // use to obtain additional results. + PageSize *int64 `location:"querystring" locationName:"PageSize" type:"integer"` +} + +// String returns the string representation +func (s ListConfigurationSetsInput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListConfigurationSetsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PageSize != nil { + v := *s.PageSize + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "PageSize", protocol.Int64Value(v), metadata) + } + return nil +} + +// A list of configuration sets in your Amazon SES account in the current AWS +// Region. +type ListConfigurationSetsOutput struct { + _ struct{} `type:"structure"` + + // An array that contains all of the configuration sets in your Amazon SES account + // in the current AWS Region. + ConfigurationSets []string `type:"list"` + + // A token that indicates that there are additional configuration sets to list. + // To view additional configuration sets, issue another request to ListConfigurationSets, + // and pass this token in the NextToken parameter. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListConfigurationSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListConfigurationSetsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.ConfigurationSets != nil { + v := s.ConfigurationSets + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ConfigurationSets", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListConfigurationSets = "ListConfigurationSets" + +// ListConfigurationSetsRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// List all of the configuration sets associated with your account in the current +// region. +// +// Configuration sets are groups of rules that you can apply to the emails you +// send. You apply a configuration set to an email by including a reference +// to the configuration set in the headers of the email. When you apply a configuration +// set to an email, all of the rules in that configuration set are applied to +// the email. +// +// // Example sending a request using ListConfigurationSetsRequest. +// req := client.ListConfigurationSetsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/ListConfigurationSets +func (c *Client) ListConfigurationSetsRequest(input *ListConfigurationSetsInput) ListConfigurationSetsRequest { + op := &aws.Operation{ + Name: opListConfigurationSets, + HTTPMethod: "GET", + HTTPPath: "/v2/email/configuration-sets", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "PageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListConfigurationSetsInput{} + } + + req := c.newRequest(op, input, &ListConfigurationSetsOutput{}) + return ListConfigurationSetsRequest{Request: req, Input: input, Copy: c.ListConfigurationSetsRequest} +} + +// ListConfigurationSetsRequest is the request type for the +// ListConfigurationSets API operation. +type ListConfigurationSetsRequest struct { + *aws.Request + Input *ListConfigurationSetsInput + Copy func(*ListConfigurationSetsInput) ListConfigurationSetsRequest +} + +// Send marshals and sends the ListConfigurationSets API request. +func (r ListConfigurationSetsRequest) Send(ctx context.Context) (*ListConfigurationSetsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListConfigurationSetsResponse{ + ListConfigurationSetsOutput: r.Request.Data.(*ListConfigurationSetsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListConfigurationSetsRequestPaginator returns a paginator for ListConfigurationSets. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListConfigurationSetsRequest(input) +// p := sesv2.NewListConfigurationSetsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListConfigurationSetsPaginator(req ListConfigurationSetsRequest) ListConfigurationSetsPaginator { + return ListConfigurationSetsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListConfigurationSetsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListConfigurationSetsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListConfigurationSetsPaginator struct { + aws.Pager +} + +func (p *ListConfigurationSetsPaginator) CurrentPage() *ListConfigurationSetsOutput { + return p.Pager.CurrentPage().(*ListConfigurationSetsOutput) +} + +// ListConfigurationSetsResponse is the response type for the +// ListConfigurationSets API operation. +type ListConfigurationSetsResponse struct { + *ListConfigurationSetsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListConfigurationSets request. +func (r *ListConfigurationSetsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_ListDedicatedIpPools.go b/service/sesv2/api_op_ListDedicatedIpPools.go new file mode 100644 index 00000000000..f255dd0fe73 --- /dev/null +++ b/service/sesv2/api_op_ListDedicatedIpPools.go @@ -0,0 +1,214 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to obtain a list of dedicated IP pools. +type ListDedicatedIpPoolsInput struct { + _ struct{} `type:"structure"` + + // A token returned from a previous call to ListDedicatedIpPools to indicate + // the position in the list of dedicated IP pools. + NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` + + // The number of results to show in a single call to ListDedicatedIpPools. If + // the number of results is larger than the number you specified in this parameter, + // then the response includes a NextToken element, which you can use to obtain + // additional results. + PageSize *int64 `location:"querystring" locationName:"PageSize" type:"integer"` +} + +// String returns the string representation +func (s ListDedicatedIpPoolsInput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDedicatedIpPoolsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PageSize != nil { + v := *s.PageSize + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "PageSize", protocol.Int64Value(v), metadata) + } + return nil +} + +// A list of dedicated IP pools. +type ListDedicatedIpPoolsOutput struct { + _ struct{} `type:"structure"` + + // A list of all of the dedicated IP pools that are associated with your AWS + // account in the current Region. + DedicatedIpPools []string `type:"list"` + + // A token that indicates that there are additional IP pools to list. To view + // additional IP pools, issue another request to ListDedicatedIpPools, passing + // this token in the NextToken parameter. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDedicatedIpPoolsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDedicatedIpPoolsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DedicatedIpPools != nil { + v := s.DedicatedIpPools + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DedicatedIpPools", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListDedicatedIpPools = "ListDedicatedIpPools" + +// ListDedicatedIpPoolsRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// List all of the dedicated IP pools that exist in your AWS account in the +// current Region. +// +// // Example sending a request using ListDedicatedIpPoolsRequest. +// req := client.ListDedicatedIpPoolsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/ListDedicatedIpPools +func (c *Client) ListDedicatedIpPoolsRequest(input *ListDedicatedIpPoolsInput) ListDedicatedIpPoolsRequest { + op := &aws.Operation{ + Name: opListDedicatedIpPools, + HTTPMethod: "GET", + HTTPPath: "/v2/email/dedicated-ip-pools", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "PageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDedicatedIpPoolsInput{} + } + + req := c.newRequest(op, input, &ListDedicatedIpPoolsOutput{}) + return ListDedicatedIpPoolsRequest{Request: req, Input: input, Copy: c.ListDedicatedIpPoolsRequest} +} + +// ListDedicatedIpPoolsRequest is the request type for the +// ListDedicatedIpPools API operation. +type ListDedicatedIpPoolsRequest struct { + *aws.Request + Input *ListDedicatedIpPoolsInput + Copy func(*ListDedicatedIpPoolsInput) ListDedicatedIpPoolsRequest +} + +// Send marshals and sends the ListDedicatedIpPools API request. +func (r ListDedicatedIpPoolsRequest) Send(ctx context.Context) (*ListDedicatedIpPoolsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListDedicatedIpPoolsResponse{ + ListDedicatedIpPoolsOutput: r.Request.Data.(*ListDedicatedIpPoolsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListDedicatedIpPoolsRequestPaginator returns a paginator for ListDedicatedIpPools. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListDedicatedIpPoolsRequest(input) +// p := sesv2.NewListDedicatedIpPoolsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListDedicatedIpPoolsPaginator(req ListDedicatedIpPoolsRequest) ListDedicatedIpPoolsPaginator { + return ListDedicatedIpPoolsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListDedicatedIpPoolsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListDedicatedIpPoolsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListDedicatedIpPoolsPaginator struct { + aws.Pager +} + +func (p *ListDedicatedIpPoolsPaginator) CurrentPage() *ListDedicatedIpPoolsOutput { + return p.Pager.CurrentPage().(*ListDedicatedIpPoolsOutput) +} + +// ListDedicatedIpPoolsResponse is the response type for the +// ListDedicatedIpPools API operation. +type ListDedicatedIpPoolsResponse struct { + *ListDedicatedIpPoolsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListDedicatedIpPools request. +func (r *ListDedicatedIpPoolsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_ListDeliverabilityTestReports.go b/service/sesv2/api_op_ListDeliverabilityTestReports.go new file mode 100644 index 00000000000..8423c399733 --- /dev/null +++ b/service/sesv2/api_op_ListDeliverabilityTestReports.go @@ -0,0 +1,223 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to list all of the predictive inbox placement tests that you've +// performed. +type ListDeliverabilityTestReportsInput struct { + _ struct{} `type:"structure"` + + // A token returned from a previous call to ListDeliverabilityTestReports to + // indicate the position in the list of predictive inbox placement tests. + NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` + + // The number of results to show in a single call to ListDeliverabilityTestReports. + // If the number of results is larger than the number you specified in this + // parameter, then the response includes a NextToken element, which you can + // use to obtain additional results. + // + // The value you specify has to be at least 0, and can be no more than 1000. + PageSize *int64 `location:"querystring" locationName:"PageSize" type:"integer"` +} + +// String returns the string representation +func (s ListDeliverabilityTestReportsInput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDeliverabilityTestReportsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PageSize != nil { + v := *s.PageSize + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "PageSize", protocol.Int64Value(v), metadata) + } + return nil +} + +// A list of the predictive inbox placement test reports that are available +// for your account, regardless of whether or not those tests are complete. +type ListDeliverabilityTestReportsOutput struct { + _ struct{} `type:"structure"` + + // An object that contains a lists of predictive inbox placement tests that + // you've performed. + // + // DeliverabilityTestReports is a required field + DeliverabilityTestReports []DeliverabilityTestReport `type:"list" required:"true"` + + // A token that indicates that there are additional predictive inbox placement + // tests to list. To view additional predictive inbox placement tests, issue + // another request to ListDeliverabilityTestReports, and pass this token in + // the NextToken parameter. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDeliverabilityTestReportsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDeliverabilityTestReportsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DeliverabilityTestReports != nil { + v := s.DeliverabilityTestReports + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DeliverabilityTestReports", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListDeliverabilityTestReports = "ListDeliverabilityTestReports" + +// ListDeliverabilityTestReportsRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Show a list of the predictive inbox placement tests that you've performed, +// regardless of their statuses. For predictive inbox placement tests that are +// complete, you can use the GetDeliverabilityTestReport operation to view the +// results. +// +// // Example sending a request using ListDeliverabilityTestReportsRequest. +// req := client.ListDeliverabilityTestReportsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/ListDeliverabilityTestReports +func (c *Client) ListDeliverabilityTestReportsRequest(input *ListDeliverabilityTestReportsInput) ListDeliverabilityTestReportsRequest { + op := &aws.Operation{ + Name: opListDeliverabilityTestReports, + HTTPMethod: "GET", + HTTPPath: "/v2/email/deliverability-dashboard/test-reports", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "PageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDeliverabilityTestReportsInput{} + } + + req := c.newRequest(op, input, &ListDeliverabilityTestReportsOutput{}) + return ListDeliverabilityTestReportsRequest{Request: req, Input: input, Copy: c.ListDeliverabilityTestReportsRequest} +} + +// ListDeliverabilityTestReportsRequest is the request type for the +// ListDeliverabilityTestReports API operation. +type ListDeliverabilityTestReportsRequest struct { + *aws.Request + Input *ListDeliverabilityTestReportsInput + Copy func(*ListDeliverabilityTestReportsInput) ListDeliverabilityTestReportsRequest +} + +// Send marshals and sends the ListDeliverabilityTestReports API request. +func (r ListDeliverabilityTestReportsRequest) Send(ctx context.Context) (*ListDeliverabilityTestReportsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListDeliverabilityTestReportsResponse{ + ListDeliverabilityTestReportsOutput: r.Request.Data.(*ListDeliverabilityTestReportsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListDeliverabilityTestReportsRequestPaginator returns a paginator for ListDeliverabilityTestReports. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListDeliverabilityTestReportsRequest(input) +// p := sesv2.NewListDeliverabilityTestReportsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListDeliverabilityTestReportsPaginator(req ListDeliverabilityTestReportsRequest) ListDeliverabilityTestReportsPaginator { + return ListDeliverabilityTestReportsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListDeliverabilityTestReportsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListDeliverabilityTestReportsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListDeliverabilityTestReportsPaginator struct { + aws.Pager +} + +func (p *ListDeliverabilityTestReportsPaginator) CurrentPage() *ListDeliverabilityTestReportsOutput { + return p.Pager.CurrentPage().(*ListDeliverabilityTestReportsOutput) +} + +// ListDeliverabilityTestReportsResponse is the response type for the +// ListDeliverabilityTestReports API operation. +type ListDeliverabilityTestReportsResponse struct { + *ListDeliverabilityTestReportsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListDeliverabilityTestReports request. +func (r *ListDeliverabilityTestReportsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_ListDomainDeliverabilityCampaigns.go b/service/sesv2/api_op_ListDomainDeliverabilityCampaigns.go new file mode 100644 index 00000000000..f3af2966895 --- /dev/null +++ b/service/sesv2/api_op_ListDomainDeliverabilityCampaigns.go @@ -0,0 +1,284 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// Retrieve deliverability data for all the campaigns that used a specific domain +// to send email during a specified time range. This data is available for a +// domain only if you enabled the Deliverability dashboard. +type ListDomainDeliverabilityCampaignsInput struct { + _ struct{} `type:"structure"` + + // The last day, in Unix time format, that you want to obtain deliverability + // data for. This value has to be less than or equal to 30 days after the value + // of the StartDate parameter. + // + // EndDate is a required field + EndDate *time.Time `location:"querystring" locationName:"EndDate" type:"timestamp" required:"true"` + + // A token that’s returned from a previous call to the ListDomainDeliverabilityCampaigns + // operation. This token indicates the position of a campaign in the list of + // campaigns. + NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` + + // The maximum number of results to include in response to a single call to + // the ListDomainDeliverabilityCampaigns operation. If the number of results + // is larger than the number that you specify in this parameter, the response + // includes a NextToken element, which you can use to obtain additional results. + PageSize *int64 `location:"querystring" locationName:"PageSize" type:"integer"` + + // The first day, in Unix time format, that you want to obtain deliverability + // data for. + // + // StartDate is a required field + StartDate *time.Time `location:"querystring" locationName:"StartDate" type:"timestamp" required:"true"` + + // The domain to obtain deliverability data for. + // + // SubscribedDomain is a required field + SubscribedDomain *string `location:"uri" locationName:"SubscribedDomain" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListDomainDeliverabilityCampaignsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDomainDeliverabilityCampaignsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListDomainDeliverabilityCampaignsInput"} + + if s.EndDate == nil { + invalidParams.Add(aws.NewErrParamRequired("EndDate")) + } + + if s.StartDate == nil { + invalidParams.Add(aws.NewErrParamRequired("StartDate")) + } + + if s.SubscribedDomain == nil { + invalidParams.Add(aws.NewErrParamRequired("SubscribedDomain")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDomainDeliverabilityCampaignsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.SubscribedDomain != nil { + v := *s.SubscribedDomain + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "SubscribedDomain", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.EndDate != nil { + v := *s.EndDate + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "EndDate", + protocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata) + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PageSize != nil { + v := *s.PageSize + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "PageSize", protocol.Int64Value(v), metadata) + } + if s.StartDate != nil { + v := *s.StartDate + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "StartDate", + protocol.TimeValue{V: v, Format: protocol.ISO8601TimeFormatName, QuotedFormatTime: false}, metadata) + } + return nil +} + +// An array of objects that provide deliverability data for all the campaigns +// that used a specific domain to send email during a specified time range. +// This data is available for a domain only if you enabled the Deliverability +// dashboard for the domain. +type ListDomainDeliverabilityCampaignsOutput struct { + _ struct{} `type:"structure"` + + // An array of responses, one for each campaign that used the domain to send + // email during the specified time range. + // + // DomainDeliverabilityCampaigns is a required field + DomainDeliverabilityCampaigns []DomainDeliverabilityCampaign `type:"list" required:"true"` + + // A token that’s returned from a previous call to the ListDomainDeliverabilityCampaigns + // operation. This token indicates the position of the campaign in the list + // of campaigns. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDomainDeliverabilityCampaignsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListDomainDeliverabilityCampaignsOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.DomainDeliverabilityCampaigns != nil { + v := s.DomainDeliverabilityCampaigns + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DomainDeliverabilityCampaigns", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListDomainDeliverabilityCampaigns = "ListDomainDeliverabilityCampaigns" + +// ListDomainDeliverabilityCampaignsRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Retrieve deliverability data for all the campaigns that used a specific domain +// to send email during a specified time range. This data is available for a +// domain only if you enabled the Deliverability dashboard for the domain. +// +// // Example sending a request using ListDomainDeliverabilityCampaignsRequest. +// req := client.ListDomainDeliverabilityCampaignsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/ListDomainDeliverabilityCampaigns +func (c *Client) ListDomainDeliverabilityCampaignsRequest(input *ListDomainDeliverabilityCampaignsInput) ListDomainDeliverabilityCampaignsRequest { + op := &aws.Operation{ + Name: opListDomainDeliverabilityCampaigns, + HTTPMethod: "GET", + HTTPPath: "/v2/email/deliverability-dashboard/domains/{SubscribedDomain}/campaigns", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "PageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDomainDeliverabilityCampaignsInput{} + } + + req := c.newRequest(op, input, &ListDomainDeliverabilityCampaignsOutput{}) + return ListDomainDeliverabilityCampaignsRequest{Request: req, Input: input, Copy: c.ListDomainDeliverabilityCampaignsRequest} +} + +// ListDomainDeliverabilityCampaignsRequest is the request type for the +// ListDomainDeliverabilityCampaigns API operation. +type ListDomainDeliverabilityCampaignsRequest struct { + *aws.Request + Input *ListDomainDeliverabilityCampaignsInput + Copy func(*ListDomainDeliverabilityCampaignsInput) ListDomainDeliverabilityCampaignsRequest +} + +// Send marshals and sends the ListDomainDeliverabilityCampaigns API request. +func (r ListDomainDeliverabilityCampaignsRequest) Send(ctx context.Context) (*ListDomainDeliverabilityCampaignsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListDomainDeliverabilityCampaignsResponse{ + ListDomainDeliverabilityCampaignsOutput: r.Request.Data.(*ListDomainDeliverabilityCampaignsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListDomainDeliverabilityCampaignsRequestPaginator returns a paginator for ListDomainDeliverabilityCampaigns. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListDomainDeliverabilityCampaignsRequest(input) +// p := sesv2.NewListDomainDeliverabilityCampaignsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListDomainDeliverabilityCampaignsPaginator(req ListDomainDeliverabilityCampaignsRequest) ListDomainDeliverabilityCampaignsPaginator { + return ListDomainDeliverabilityCampaignsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListDomainDeliverabilityCampaignsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListDomainDeliverabilityCampaignsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListDomainDeliverabilityCampaignsPaginator struct { + aws.Pager +} + +func (p *ListDomainDeliverabilityCampaignsPaginator) CurrentPage() *ListDomainDeliverabilityCampaignsOutput { + return p.Pager.CurrentPage().(*ListDomainDeliverabilityCampaignsOutput) +} + +// ListDomainDeliverabilityCampaignsResponse is the response type for the +// ListDomainDeliverabilityCampaigns API operation. +type ListDomainDeliverabilityCampaignsResponse struct { + *ListDomainDeliverabilityCampaignsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListDomainDeliverabilityCampaigns request. +func (r *ListDomainDeliverabilityCampaignsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_ListEmailIdentities.go b/service/sesv2/api_op_ListEmailIdentities.go new file mode 100644 index 00000000000..27be8bd5780 --- /dev/null +++ b/service/sesv2/api_op_ListEmailIdentities.go @@ -0,0 +1,223 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to list all of the email identities associated with your AWS account. +// This list includes identities that you've already verified, identities that +// are unverified, and identities that were verified in the past, but are no +// longer verified. +type ListEmailIdentitiesInput struct { + _ struct{} `type:"structure"` + + // A token returned from a previous call to ListEmailIdentities to indicate + // the position in the list of identities. + NextToken *string `location:"querystring" locationName:"NextToken" type:"string"` + + // The number of results to show in a single call to ListEmailIdentities. If + // the number of results is larger than the number you specified in this parameter, + // then the response includes a NextToken element, which you can use to obtain + // additional results. + // + // The value you specify has to be at least 0, and can be no more than 1000. + PageSize *int64 `location:"querystring" locationName:"PageSize" type:"integer"` +} + +// String returns the string representation +func (s ListEmailIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListEmailIdentitiesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PageSize != nil { + v := *s.PageSize + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "PageSize", protocol.Int64Value(v), metadata) + } + return nil +} + +// A list of all of the identities that you've attempted to verify, regardless +// of whether or not those identities were successfully verified. +type ListEmailIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // An array that includes all of the email identities associated with your AWS + // account. + EmailIdentities []IdentityInfo `type:"list"` + + // A token that indicates that there are additional configuration sets to list. + // To view additional configuration sets, issue another request to ListEmailIdentities, + // and pass this token in the NextToken parameter. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListEmailIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListEmailIdentitiesOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.EmailIdentities != nil { + v := s.EmailIdentities + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "EmailIdentities", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.NextToken != nil { + v := *s.NextToken + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "NextToken", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opListEmailIdentities = "ListEmailIdentities" + +// ListEmailIdentitiesRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Returns a list of all of the email identities that are associated with your +// AWS account. An identity can be either an email address or a domain. This +// operation returns identities that are verified as well as those that aren't. +// This operation returns identities that are associated with Amazon SES and +// Amazon Pinpoint. +// +// // Example sending a request using ListEmailIdentitiesRequest. +// req := client.ListEmailIdentitiesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/ListEmailIdentities +func (c *Client) ListEmailIdentitiesRequest(input *ListEmailIdentitiesInput) ListEmailIdentitiesRequest { + op := &aws.Operation{ + Name: opListEmailIdentities, + HTTPMethod: "GET", + HTTPPath: "/v2/email/identities", + Paginator: &aws.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "PageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEmailIdentitiesInput{} + } + + req := c.newRequest(op, input, &ListEmailIdentitiesOutput{}) + return ListEmailIdentitiesRequest{Request: req, Input: input, Copy: c.ListEmailIdentitiesRequest} +} + +// ListEmailIdentitiesRequest is the request type for the +// ListEmailIdentities API operation. +type ListEmailIdentitiesRequest struct { + *aws.Request + Input *ListEmailIdentitiesInput + Copy func(*ListEmailIdentitiesInput) ListEmailIdentitiesRequest +} + +// Send marshals and sends the ListEmailIdentities API request. +func (r ListEmailIdentitiesRequest) Send(ctx context.Context) (*ListEmailIdentitiesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListEmailIdentitiesResponse{ + ListEmailIdentitiesOutput: r.Request.Data.(*ListEmailIdentitiesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListEmailIdentitiesRequestPaginator returns a paginator for ListEmailIdentities. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListEmailIdentitiesRequest(input) +// p := sesv2.NewListEmailIdentitiesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListEmailIdentitiesPaginator(req ListEmailIdentitiesRequest) ListEmailIdentitiesPaginator { + return ListEmailIdentitiesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListEmailIdentitiesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListEmailIdentitiesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListEmailIdentitiesPaginator struct { + aws.Pager +} + +func (p *ListEmailIdentitiesPaginator) CurrentPage() *ListEmailIdentitiesOutput { + return p.Pager.CurrentPage().(*ListEmailIdentitiesOutput) +} + +// ListEmailIdentitiesResponse is the response type for the +// ListEmailIdentities API operation. +type ListEmailIdentitiesResponse struct { + *ListEmailIdentitiesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListEmailIdentities request. +func (r *ListEmailIdentitiesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_ListTagsForResource.go b/service/sesv2/api_op_ListTagsForResource.go new file mode 100644 index 00000000000..88be5b14312 --- /dev/null +++ b/service/sesv2/api_op_ListTagsForResource.go @@ -0,0 +1,158 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource that you want to retrieve + // tag information for. + // + // ResourceArn is a required field + ResourceArn *string `location:"querystring" locationName:"ResourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListTagsForResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTagsForResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "ResourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // An array that lists all the tags that are associated with the resource. Each + // tag consists of a required tag key (Key) and an associated tag value (Value) + // + // Tags is a required field + Tags []Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ListTagsForResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Retrieve a list of the tags (keys and values) that are associated with a +// specified resource. A tag is a label that you optionally define and associate +// with a resource. Each tag consists of a required tag key and an optional +// associated tag value. A tag key is a general label that acts as a category +// for more specific tag values. A tag value acts as a descriptor within a tag +// key. +// +// // Example sending a request using ListTagsForResourceRequest. +// req := client.ListTagsForResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/ListTagsForResource +func (c *Client) ListTagsForResourceRequest(input *ListTagsForResourceInput) ListTagsForResourceRequest { + op := &aws.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/v2/email/tags", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req := c.newRequest(op, input, &ListTagsForResourceOutput{}) + return ListTagsForResourceRequest{Request: req, Input: input, Copy: c.ListTagsForResourceRequest} +} + +// ListTagsForResourceRequest is the request type for the +// ListTagsForResource API operation. +type ListTagsForResourceRequest struct { + *aws.Request + Input *ListTagsForResourceInput + Copy func(*ListTagsForResourceInput) ListTagsForResourceRequest +} + +// Send marshals and sends the ListTagsForResource API request. +func (r ListTagsForResourceRequest) Send(ctx context.Context) (*ListTagsForResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListTagsForResourceResponse{ + ListTagsForResourceOutput: r.Request.Data.(*ListTagsForResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListTagsForResourceResponse is the response type for the +// ListTagsForResource API operation. +type ListTagsForResourceResponse struct { + *ListTagsForResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListTagsForResource request. +func (r *ListTagsForResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_PutAccountDedicatedIpWarmupAttributes.go b/service/sesv2/api_op_PutAccountDedicatedIpWarmupAttributes.go new file mode 100644 index 00000000000..9a0731d3711 --- /dev/null +++ b/service/sesv2/api_op_PutAccountDedicatedIpWarmupAttributes.go @@ -0,0 +1,124 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to enable or disable the automatic IP address warm-up feature. +type PutAccountDedicatedIpWarmupAttributesInput struct { + _ struct{} `type:"structure"` + + // Enables or disables the automatic warm-up feature for dedicated IP addresses + // that are associated with your Amazon SES account in the current AWS Region. + // Set to true to enable the automatic warm-up feature, or set to false to disable + // it. + AutoWarmupEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s PutAccountDedicatedIpWarmupAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutAccountDedicatedIpWarmupAttributesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.AutoWarmupEnabled != nil { + v := *s.AutoWarmupEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "AutoWarmupEnabled", protocol.BoolValue(v), metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type PutAccountDedicatedIpWarmupAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutAccountDedicatedIpWarmupAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutAccountDedicatedIpWarmupAttributesOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutAccountDedicatedIpWarmupAttributes = "PutAccountDedicatedIpWarmupAttributes" + +// PutAccountDedicatedIpWarmupAttributesRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Enable or disable the automatic warm-up feature for dedicated IP addresses. +// +// // Example sending a request using PutAccountDedicatedIpWarmupAttributesRequest. +// req := client.PutAccountDedicatedIpWarmupAttributesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/PutAccountDedicatedIpWarmupAttributes +func (c *Client) PutAccountDedicatedIpWarmupAttributesRequest(input *PutAccountDedicatedIpWarmupAttributesInput) PutAccountDedicatedIpWarmupAttributesRequest { + op := &aws.Operation{ + Name: opPutAccountDedicatedIpWarmupAttributes, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/account/dedicated-ips/warmup", + } + + if input == nil { + input = &PutAccountDedicatedIpWarmupAttributesInput{} + } + + req := c.newRequest(op, input, &PutAccountDedicatedIpWarmupAttributesOutput{}) + return PutAccountDedicatedIpWarmupAttributesRequest{Request: req, Input: input, Copy: c.PutAccountDedicatedIpWarmupAttributesRequest} +} + +// PutAccountDedicatedIpWarmupAttributesRequest is the request type for the +// PutAccountDedicatedIpWarmupAttributes API operation. +type PutAccountDedicatedIpWarmupAttributesRequest struct { + *aws.Request + Input *PutAccountDedicatedIpWarmupAttributesInput + Copy func(*PutAccountDedicatedIpWarmupAttributesInput) PutAccountDedicatedIpWarmupAttributesRequest +} + +// Send marshals and sends the PutAccountDedicatedIpWarmupAttributes API request. +func (r PutAccountDedicatedIpWarmupAttributesRequest) Send(ctx context.Context) (*PutAccountDedicatedIpWarmupAttributesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutAccountDedicatedIpWarmupAttributesResponse{ + PutAccountDedicatedIpWarmupAttributesOutput: r.Request.Data.(*PutAccountDedicatedIpWarmupAttributesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutAccountDedicatedIpWarmupAttributesResponse is the response type for the +// PutAccountDedicatedIpWarmupAttributes API operation. +type PutAccountDedicatedIpWarmupAttributesResponse struct { + *PutAccountDedicatedIpWarmupAttributesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutAccountDedicatedIpWarmupAttributes request. +func (r *PutAccountDedicatedIpWarmupAttributesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_PutAccountSendingAttributes.go b/service/sesv2/api_op_PutAccountSendingAttributes.go new file mode 100644 index 00000000000..faa36381b44 --- /dev/null +++ b/service/sesv2/api_op_PutAccountSendingAttributes.go @@ -0,0 +1,125 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to change the ability of your account to send email. +type PutAccountSendingAttributesInput struct { + _ struct{} `type:"structure"` + + // Enables or disables your account's ability to send email. Set to true to + // enable email sending, or set to false to disable email sending. + // + // If AWS paused your account's ability to send email, you can't use this operation + // to resume your account's ability to send email. + SendingEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s PutAccountSendingAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutAccountSendingAttributesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.SendingEnabled != nil { + v := *s.SendingEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SendingEnabled", protocol.BoolValue(v), metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type PutAccountSendingAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutAccountSendingAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutAccountSendingAttributesOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutAccountSendingAttributes = "PutAccountSendingAttributes" + +// PutAccountSendingAttributesRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Enable or disable the ability of your account to send email. +// +// // Example sending a request using PutAccountSendingAttributesRequest. +// req := client.PutAccountSendingAttributesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/PutAccountSendingAttributes +func (c *Client) PutAccountSendingAttributesRequest(input *PutAccountSendingAttributesInput) PutAccountSendingAttributesRequest { + op := &aws.Operation{ + Name: opPutAccountSendingAttributes, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/account/sending", + } + + if input == nil { + input = &PutAccountSendingAttributesInput{} + } + + req := c.newRequest(op, input, &PutAccountSendingAttributesOutput{}) + return PutAccountSendingAttributesRequest{Request: req, Input: input, Copy: c.PutAccountSendingAttributesRequest} +} + +// PutAccountSendingAttributesRequest is the request type for the +// PutAccountSendingAttributes API operation. +type PutAccountSendingAttributesRequest struct { + *aws.Request + Input *PutAccountSendingAttributesInput + Copy func(*PutAccountSendingAttributesInput) PutAccountSendingAttributesRequest +} + +// Send marshals and sends the PutAccountSendingAttributes API request. +func (r PutAccountSendingAttributesRequest) Send(ctx context.Context) (*PutAccountSendingAttributesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutAccountSendingAttributesResponse{ + PutAccountSendingAttributesOutput: r.Request.Data.(*PutAccountSendingAttributesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutAccountSendingAttributesResponse is the response type for the +// PutAccountSendingAttributes API operation. +type PutAccountSendingAttributesResponse struct { + *PutAccountSendingAttributesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutAccountSendingAttributes request. +func (r *PutAccountSendingAttributesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_PutConfigurationSetDeliveryOptions.go b/service/sesv2/api_op_PutConfigurationSetDeliveryOptions.go new file mode 100644 index 00000000000..18e63478166 --- /dev/null +++ b/service/sesv2/api_op_PutConfigurationSetDeliveryOptions.go @@ -0,0 +1,162 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to associate a configuration set with a dedicated IP pool. +type PutConfigurationSetDeliveryOptionsInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that you want to associate with a dedicated + // IP pool. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `location:"uri" locationName:"ConfigurationSetName" type:"string" required:"true"` + + // The name of the dedicated IP pool that you want to associate with the configuration + // set. + SendingPoolName *string `type:"string"` + + // Specifies whether messages that use the configuration set are required to + // use Transport Layer Security (TLS). If the value is Require, messages are + // only delivered if a TLS connection can be established. If the value is Optional, + // messages can be delivered in plain text if a TLS connection can't be established. + TlsPolicy TlsPolicy `type:"string" enum:"true"` +} + +// String returns the string representation +func (s PutConfigurationSetDeliveryOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutConfigurationSetDeliveryOptionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutConfigurationSetDeliveryOptionsInput"} + + if s.ConfigurationSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfigurationSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutConfigurationSetDeliveryOptionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.SendingPoolName != nil { + v := *s.SendingPoolName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SendingPoolName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.TlsPolicy) > 0 { + v := s.TlsPolicy + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TlsPolicy", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type PutConfigurationSetDeliveryOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutConfigurationSetDeliveryOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutConfigurationSetDeliveryOptionsOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutConfigurationSetDeliveryOptions = "PutConfigurationSetDeliveryOptions" + +// PutConfigurationSetDeliveryOptionsRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Associate a configuration set with a dedicated IP pool. You can use dedicated +// IP pools to create groups of dedicated IP addresses for sending specific +// types of email. +// +// // Example sending a request using PutConfigurationSetDeliveryOptionsRequest. +// req := client.PutConfigurationSetDeliveryOptionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/PutConfigurationSetDeliveryOptions +func (c *Client) PutConfigurationSetDeliveryOptionsRequest(input *PutConfigurationSetDeliveryOptionsInput) PutConfigurationSetDeliveryOptionsRequest { + op := &aws.Operation{ + Name: opPutConfigurationSetDeliveryOptions, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/configuration-sets/{ConfigurationSetName}/delivery-options", + } + + if input == nil { + input = &PutConfigurationSetDeliveryOptionsInput{} + } + + req := c.newRequest(op, input, &PutConfigurationSetDeliveryOptionsOutput{}) + return PutConfigurationSetDeliveryOptionsRequest{Request: req, Input: input, Copy: c.PutConfigurationSetDeliveryOptionsRequest} +} + +// PutConfigurationSetDeliveryOptionsRequest is the request type for the +// PutConfigurationSetDeliveryOptions API operation. +type PutConfigurationSetDeliveryOptionsRequest struct { + *aws.Request + Input *PutConfigurationSetDeliveryOptionsInput + Copy func(*PutConfigurationSetDeliveryOptionsInput) PutConfigurationSetDeliveryOptionsRequest +} + +// Send marshals and sends the PutConfigurationSetDeliveryOptions API request. +func (r PutConfigurationSetDeliveryOptionsRequest) Send(ctx context.Context) (*PutConfigurationSetDeliveryOptionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutConfigurationSetDeliveryOptionsResponse{ + PutConfigurationSetDeliveryOptionsOutput: r.Request.Data.(*PutConfigurationSetDeliveryOptionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutConfigurationSetDeliveryOptionsResponse is the response type for the +// PutConfigurationSetDeliveryOptions API operation. +type PutConfigurationSetDeliveryOptionsResponse struct { + *PutConfigurationSetDeliveryOptionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutConfigurationSetDeliveryOptions request. +func (r *PutConfigurationSetDeliveryOptionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_PutConfigurationSetReputationOptions.go b/service/sesv2/api_op_PutConfigurationSetReputationOptions.go new file mode 100644 index 00000000000..9f78b0938bf --- /dev/null +++ b/service/sesv2/api_op_PutConfigurationSetReputationOptions.go @@ -0,0 +1,151 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to enable or disable tracking of reputation metrics for a configuration +// set. +type PutConfigurationSetReputationOptionsInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that you want to enable or disable reputation + // metric tracking for. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `location:"uri" locationName:"ConfigurationSetName" type:"string" required:"true"` + + // If true, tracking of reputation metrics is enabled for the configuration + // set. If false, tracking of reputation metrics is disabled for the configuration + // set. + ReputationMetricsEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s PutConfigurationSetReputationOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutConfigurationSetReputationOptionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutConfigurationSetReputationOptionsInput"} + + if s.ConfigurationSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfigurationSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutConfigurationSetReputationOptionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ReputationMetricsEnabled != nil { + v := *s.ReputationMetricsEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ReputationMetricsEnabled", protocol.BoolValue(v), metadata) + } + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type PutConfigurationSetReputationOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutConfigurationSetReputationOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutConfigurationSetReputationOptionsOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutConfigurationSetReputationOptions = "PutConfigurationSetReputationOptions" + +// PutConfigurationSetReputationOptionsRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Enable or disable collection of reputation metrics for emails that you send +// using a particular configuration set in a specific AWS Region. +// +// // Example sending a request using PutConfigurationSetReputationOptionsRequest. +// req := client.PutConfigurationSetReputationOptionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/PutConfigurationSetReputationOptions +func (c *Client) PutConfigurationSetReputationOptionsRequest(input *PutConfigurationSetReputationOptionsInput) PutConfigurationSetReputationOptionsRequest { + op := &aws.Operation{ + Name: opPutConfigurationSetReputationOptions, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/configuration-sets/{ConfigurationSetName}/reputation-options", + } + + if input == nil { + input = &PutConfigurationSetReputationOptionsInput{} + } + + req := c.newRequest(op, input, &PutConfigurationSetReputationOptionsOutput{}) + return PutConfigurationSetReputationOptionsRequest{Request: req, Input: input, Copy: c.PutConfigurationSetReputationOptionsRequest} +} + +// PutConfigurationSetReputationOptionsRequest is the request type for the +// PutConfigurationSetReputationOptions API operation. +type PutConfigurationSetReputationOptionsRequest struct { + *aws.Request + Input *PutConfigurationSetReputationOptionsInput + Copy func(*PutConfigurationSetReputationOptionsInput) PutConfigurationSetReputationOptionsRequest +} + +// Send marshals and sends the PutConfigurationSetReputationOptions API request. +func (r PutConfigurationSetReputationOptionsRequest) Send(ctx context.Context) (*PutConfigurationSetReputationOptionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutConfigurationSetReputationOptionsResponse{ + PutConfigurationSetReputationOptionsOutput: r.Request.Data.(*PutConfigurationSetReputationOptionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutConfigurationSetReputationOptionsResponse is the response type for the +// PutConfigurationSetReputationOptions API operation. +type PutConfigurationSetReputationOptionsResponse struct { + *PutConfigurationSetReputationOptionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutConfigurationSetReputationOptions request. +func (r *PutConfigurationSetReputationOptionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_PutConfigurationSetSendingOptions.go b/service/sesv2/api_op_PutConfigurationSetSendingOptions.go new file mode 100644 index 00000000000..14421398327 --- /dev/null +++ b/service/sesv2/api_op_PutConfigurationSetSendingOptions.go @@ -0,0 +1,150 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to enable or disable the ability of Amazon SES to send emails that +// use a specific configuration set. +type PutConfigurationSetSendingOptionsInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that you want to enable or disable email + // sending for. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `location:"uri" locationName:"ConfigurationSetName" type:"string" required:"true"` + + // If true, email sending is enabled for the configuration set. If false, email + // sending is disabled for the configuration set. + SendingEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s PutConfigurationSetSendingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutConfigurationSetSendingOptionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutConfigurationSetSendingOptionsInput"} + + if s.ConfigurationSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfigurationSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutConfigurationSetSendingOptionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.SendingEnabled != nil { + v := *s.SendingEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SendingEnabled", protocol.BoolValue(v), metadata) + } + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type PutConfigurationSetSendingOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutConfigurationSetSendingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutConfigurationSetSendingOptionsOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutConfigurationSetSendingOptions = "PutConfigurationSetSendingOptions" + +// PutConfigurationSetSendingOptionsRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Enable or disable email sending for messages that use a particular configuration +// set in a specific AWS Region. +// +// // Example sending a request using PutConfigurationSetSendingOptionsRequest. +// req := client.PutConfigurationSetSendingOptionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/PutConfigurationSetSendingOptions +func (c *Client) PutConfigurationSetSendingOptionsRequest(input *PutConfigurationSetSendingOptionsInput) PutConfigurationSetSendingOptionsRequest { + op := &aws.Operation{ + Name: opPutConfigurationSetSendingOptions, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/configuration-sets/{ConfigurationSetName}/sending", + } + + if input == nil { + input = &PutConfigurationSetSendingOptionsInput{} + } + + req := c.newRequest(op, input, &PutConfigurationSetSendingOptionsOutput{}) + return PutConfigurationSetSendingOptionsRequest{Request: req, Input: input, Copy: c.PutConfigurationSetSendingOptionsRequest} +} + +// PutConfigurationSetSendingOptionsRequest is the request type for the +// PutConfigurationSetSendingOptions API operation. +type PutConfigurationSetSendingOptionsRequest struct { + *aws.Request + Input *PutConfigurationSetSendingOptionsInput + Copy func(*PutConfigurationSetSendingOptionsInput) PutConfigurationSetSendingOptionsRequest +} + +// Send marshals and sends the PutConfigurationSetSendingOptions API request. +func (r PutConfigurationSetSendingOptionsRequest) Send(ctx context.Context) (*PutConfigurationSetSendingOptionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutConfigurationSetSendingOptionsResponse{ + PutConfigurationSetSendingOptionsOutput: r.Request.Data.(*PutConfigurationSetSendingOptionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutConfigurationSetSendingOptionsResponse is the response type for the +// PutConfigurationSetSendingOptions API operation. +type PutConfigurationSetSendingOptionsResponse struct { + *PutConfigurationSetSendingOptionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutConfigurationSetSendingOptions request. +func (r *PutConfigurationSetSendingOptionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_PutConfigurationSetTrackingOptions.go b/service/sesv2/api_op_PutConfigurationSetTrackingOptions.go new file mode 100644 index 00000000000..ba61b2f8731 --- /dev/null +++ b/service/sesv2/api_op_PutConfigurationSetTrackingOptions.go @@ -0,0 +1,149 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to add a custom domain for tracking open and click events to a +// configuration set. +type PutConfigurationSetTrackingOptionsInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that you want to add a custom tracking + // domain to. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `location:"uri" locationName:"ConfigurationSetName" type:"string" required:"true"` + + // The domain that you want to use to track open and click events. + CustomRedirectDomain *string `type:"string"` +} + +// String returns the string representation +func (s PutConfigurationSetTrackingOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutConfigurationSetTrackingOptionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutConfigurationSetTrackingOptionsInput"} + + if s.ConfigurationSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfigurationSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutConfigurationSetTrackingOptionsInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.CustomRedirectDomain != nil { + v := *s.CustomRedirectDomain + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CustomRedirectDomain", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type PutConfigurationSetTrackingOptionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutConfigurationSetTrackingOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutConfigurationSetTrackingOptionsOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutConfigurationSetTrackingOptions = "PutConfigurationSetTrackingOptions" + +// PutConfigurationSetTrackingOptionsRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Specify a custom domain to use for open and click tracking elements in email +// that you send. +// +// // Example sending a request using PutConfigurationSetTrackingOptionsRequest. +// req := client.PutConfigurationSetTrackingOptionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/PutConfigurationSetTrackingOptions +func (c *Client) PutConfigurationSetTrackingOptionsRequest(input *PutConfigurationSetTrackingOptionsInput) PutConfigurationSetTrackingOptionsRequest { + op := &aws.Operation{ + Name: opPutConfigurationSetTrackingOptions, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/configuration-sets/{ConfigurationSetName}/tracking-options", + } + + if input == nil { + input = &PutConfigurationSetTrackingOptionsInput{} + } + + req := c.newRequest(op, input, &PutConfigurationSetTrackingOptionsOutput{}) + return PutConfigurationSetTrackingOptionsRequest{Request: req, Input: input, Copy: c.PutConfigurationSetTrackingOptionsRequest} +} + +// PutConfigurationSetTrackingOptionsRequest is the request type for the +// PutConfigurationSetTrackingOptions API operation. +type PutConfigurationSetTrackingOptionsRequest struct { + *aws.Request + Input *PutConfigurationSetTrackingOptionsInput + Copy func(*PutConfigurationSetTrackingOptionsInput) PutConfigurationSetTrackingOptionsRequest +} + +// Send marshals and sends the PutConfigurationSetTrackingOptions API request. +func (r PutConfigurationSetTrackingOptionsRequest) Send(ctx context.Context) (*PutConfigurationSetTrackingOptionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutConfigurationSetTrackingOptionsResponse{ + PutConfigurationSetTrackingOptionsOutput: r.Request.Data.(*PutConfigurationSetTrackingOptionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutConfigurationSetTrackingOptionsResponse is the response type for the +// PutConfigurationSetTrackingOptions API operation. +type PutConfigurationSetTrackingOptionsResponse struct { + *PutConfigurationSetTrackingOptionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutConfigurationSetTrackingOptions request. +func (r *PutConfigurationSetTrackingOptionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_PutDedicatedIpInPool.go b/service/sesv2/api_op_PutDedicatedIpInPool.go new file mode 100644 index 00000000000..4a63ed91871 --- /dev/null +++ b/service/sesv2/api_op_PutDedicatedIpInPool.go @@ -0,0 +1,161 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to move a dedicated IP address to a dedicated IP pool. +type PutDedicatedIpInPoolInput struct { + _ struct{} `type:"structure"` + + // The name of the IP pool that you want to add the dedicated IP address to. + // You have to specify an IP pool that already exists. + // + // DestinationPoolName is a required field + DestinationPoolName *string `type:"string" required:"true"` + + // The IP address that you want to move to the dedicated IP pool. The value + // you specify has to be a dedicated IP address that's associated with your + // AWS account. + // + // Ip is a required field + Ip *string `location:"uri" locationName:"IP" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutDedicatedIpInPoolInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDedicatedIpInPoolInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutDedicatedIpInPoolInput"} + + if s.DestinationPoolName == nil { + invalidParams.Add(aws.NewErrParamRequired("DestinationPoolName")) + } + + if s.Ip == nil { + invalidParams.Add(aws.NewErrParamRequired("Ip")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutDedicatedIpInPoolInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DestinationPoolName != nil { + v := *s.DestinationPoolName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DestinationPoolName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Ip != nil { + v := *s.Ip + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "IP", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type PutDedicatedIpInPoolOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutDedicatedIpInPoolOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutDedicatedIpInPoolOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutDedicatedIpInPool = "PutDedicatedIpInPool" + +// PutDedicatedIpInPoolRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Move a dedicated IP address to an existing dedicated IP pool. +// +// The dedicated IP address that you specify must already exist, and must be +// associated with your AWS account. +// +// The dedicated IP pool you specify must already exist. You can create a new +// pool by using the CreateDedicatedIpPool operation. +// +// // Example sending a request using PutDedicatedIpInPoolRequest. +// req := client.PutDedicatedIpInPoolRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/PutDedicatedIpInPool +func (c *Client) PutDedicatedIpInPoolRequest(input *PutDedicatedIpInPoolInput) PutDedicatedIpInPoolRequest { + op := &aws.Operation{ + Name: opPutDedicatedIpInPool, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/dedicated-ips/{IP}/pool", + } + + if input == nil { + input = &PutDedicatedIpInPoolInput{} + } + + req := c.newRequest(op, input, &PutDedicatedIpInPoolOutput{}) + return PutDedicatedIpInPoolRequest{Request: req, Input: input, Copy: c.PutDedicatedIpInPoolRequest} +} + +// PutDedicatedIpInPoolRequest is the request type for the +// PutDedicatedIpInPool API operation. +type PutDedicatedIpInPoolRequest struct { + *aws.Request + Input *PutDedicatedIpInPoolInput + Copy func(*PutDedicatedIpInPoolInput) PutDedicatedIpInPoolRequest +} + +// Send marshals and sends the PutDedicatedIpInPool API request. +func (r PutDedicatedIpInPoolRequest) Send(ctx context.Context) (*PutDedicatedIpInPoolResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutDedicatedIpInPoolResponse{ + PutDedicatedIpInPoolOutput: r.Request.Data.(*PutDedicatedIpInPoolOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutDedicatedIpInPoolResponse is the response type for the +// PutDedicatedIpInPool API operation. +type PutDedicatedIpInPoolResponse struct { + *PutDedicatedIpInPoolOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutDedicatedIpInPool request. +func (r *PutDedicatedIpInPoolResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_PutDedicatedIpWarmupAttributes.go b/service/sesv2/api_op_PutDedicatedIpWarmupAttributes.go new file mode 100644 index 00000000000..9b23ca02940 --- /dev/null +++ b/service/sesv2/api_op_PutDedicatedIpWarmupAttributes.go @@ -0,0 +1,152 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to change the warm-up attributes for a dedicated IP address. This +// operation is useful when you want to resume the warm-up process for an existing +// IP address. +type PutDedicatedIpWarmupAttributesInput struct { + _ struct{} `type:"structure"` + + // The dedicated IP address that you want to update the warm-up attributes for. + // + // Ip is a required field + Ip *string `location:"uri" locationName:"IP" type:"string" required:"true"` + + // The warm-up percentage that you want to associate with the dedicated IP address. + // + // WarmupPercentage is a required field + WarmupPercentage *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s PutDedicatedIpWarmupAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDedicatedIpWarmupAttributesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutDedicatedIpWarmupAttributesInput"} + + if s.Ip == nil { + invalidParams.Add(aws.NewErrParamRequired("Ip")) + } + + if s.WarmupPercentage == nil { + invalidParams.Add(aws.NewErrParamRequired("WarmupPercentage")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutDedicatedIpWarmupAttributesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.WarmupPercentage != nil { + v := *s.WarmupPercentage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "WarmupPercentage", protocol.Int64Value(v), metadata) + } + if s.Ip != nil { + v := *s.Ip + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "IP", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type PutDedicatedIpWarmupAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutDedicatedIpWarmupAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutDedicatedIpWarmupAttributesOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutDedicatedIpWarmupAttributes = "PutDedicatedIpWarmupAttributes" + +// PutDedicatedIpWarmupAttributesRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// // Example sending a request using PutDedicatedIpWarmupAttributesRequest. +// req := client.PutDedicatedIpWarmupAttributesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/PutDedicatedIpWarmupAttributes +func (c *Client) PutDedicatedIpWarmupAttributesRequest(input *PutDedicatedIpWarmupAttributesInput) PutDedicatedIpWarmupAttributesRequest { + op := &aws.Operation{ + Name: opPutDedicatedIpWarmupAttributes, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/dedicated-ips/{IP}/warmup", + } + + if input == nil { + input = &PutDedicatedIpWarmupAttributesInput{} + } + + req := c.newRequest(op, input, &PutDedicatedIpWarmupAttributesOutput{}) + return PutDedicatedIpWarmupAttributesRequest{Request: req, Input: input, Copy: c.PutDedicatedIpWarmupAttributesRequest} +} + +// PutDedicatedIpWarmupAttributesRequest is the request type for the +// PutDedicatedIpWarmupAttributes API operation. +type PutDedicatedIpWarmupAttributesRequest struct { + *aws.Request + Input *PutDedicatedIpWarmupAttributesInput + Copy func(*PutDedicatedIpWarmupAttributesInput) PutDedicatedIpWarmupAttributesRequest +} + +// Send marshals and sends the PutDedicatedIpWarmupAttributes API request. +func (r PutDedicatedIpWarmupAttributesRequest) Send(ctx context.Context) (*PutDedicatedIpWarmupAttributesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutDedicatedIpWarmupAttributesResponse{ + PutDedicatedIpWarmupAttributesOutput: r.Request.Data.(*PutDedicatedIpWarmupAttributesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutDedicatedIpWarmupAttributesResponse is the response type for the +// PutDedicatedIpWarmupAttributes API operation. +type PutDedicatedIpWarmupAttributesResponse struct { + *PutDedicatedIpWarmupAttributesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutDedicatedIpWarmupAttributes request. +func (r *PutDedicatedIpWarmupAttributesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_PutDeliverabilityDashboardOption.go b/service/sesv2/api_op_PutDeliverabilityDashboardOption.go new file mode 100644 index 00000000000..a275fca7e81 --- /dev/null +++ b/service/sesv2/api_op_PutDeliverabilityDashboardOption.go @@ -0,0 +1,169 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// Enable or disable the Deliverability dashboard. When you enable the Deliverability +// dashboard, you gain access to reputation, deliverability, and other metrics +// for the domains that you use to send email using Amazon SES API v2. You also +// gain the ability to perform predictive inbox placement tests. +// +// When you use the Deliverability dashboard, you pay a monthly subscription +// charge, in addition to any other fees that you accrue by using Amazon SES +// and other AWS services. For more information about the features and cost +// of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing (http://aws.amazon.com/pinpoint/pricing/). +type PutDeliverabilityDashboardOptionInput struct { + _ struct{} `type:"structure"` + + // Specifies whether to enable the Deliverability dashboard. To enable the dashboard, + // set this value to true. + // + // DashboardEnabled is a required field + DashboardEnabled *bool `type:"boolean" required:"true"` + + // An array of objects, one for each verified domain that you use to send email + // and enabled the Deliverability dashboard for. + SubscribedDomains []DomainDeliverabilityTrackingOption `type:"list"` +} + +// String returns the string representation +func (s PutDeliverabilityDashboardOptionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutDeliverabilityDashboardOptionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutDeliverabilityDashboardOptionInput"} + + if s.DashboardEnabled == nil { + invalidParams.Add(aws.NewErrParamRequired("DashboardEnabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutDeliverabilityDashboardOptionInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.DashboardEnabled != nil { + v := *s.DashboardEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DashboardEnabled", protocol.BoolValue(v), metadata) + } + if s.SubscribedDomains != nil { + v := s.SubscribedDomains + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "SubscribedDomains", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// A response that indicates whether the Deliverability dashboard is enabled. +type PutDeliverabilityDashboardOptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutDeliverabilityDashboardOptionOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutDeliverabilityDashboardOptionOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutDeliverabilityDashboardOption = "PutDeliverabilityDashboardOption" + +// PutDeliverabilityDashboardOptionRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Enable or disable the Deliverability dashboard. When you enable the Deliverability +// dashboard, you gain access to reputation, deliverability, and other metrics +// for the domains that you use to send email. You also gain the ability to +// perform predictive inbox placement tests. +// +// When you use the Deliverability dashboard, you pay a monthly subscription +// charge, in addition to any other fees that you accrue by using Amazon SES +// and other AWS services. For more information about the features and cost +// of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing (http://aws.amazon.com/pinpoint/pricing/). +// +// // Example sending a request using PutDeliverabilityDashboardOptionRequest. +// req := client.PutDeliverabilityDashboardOptionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/PutDeliverabilityDashboardOption +func (c *Client) PutDeliverabilityDashboardOptionRequest(input *PutDeliverabilityDashboardOptionInput) PutDeliverabilityDashboardOptionRequest { + op := &aws.Operation{ + Name: opPutDeliverabilityDashboardOption, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/deliverability-dashboard", + } + + if input == nil { + input = &PutDeliverabilityDashboardOptionInput{} + } + + req := c.newRequest(op, input, &PutDeliverabilityDashboardOptionOutput{}) + return PutDeliverabilityDashboardOptionRequest{Request: req, Input: input, Copy: c.PutDeliverabilityDashboardOptionRequest} +} + +// PutDeliverabilityDashboardOptionRequest is the request type for the +// PutDeliverabilityDashboardOption API operation. +type PutDeliverabilityDashboardOptionRequest struct { + *aws.Request + Input *PutDeliverabilityDashboardOptionInput + Copy func(*PutDeliverabilityDashboardOptionInput) PutDeliverabilityDashboardOptionRequest +} + +// Send marshals and sends the PutDeliverabilityDashboardOption API request. +func (r PutDeliverabilityDashboardOptionRequest) Send(ctx context.Context) (*PutDeliverabilityDashboardOptionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutDeliverabilityDashboardOptionResponse{ + PutDeliverabilityDashboardOptionOutput: r.Request.Data.(*PutDeliverabilityDashboardOptionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutDeliverabilityDashboardOptionResponse is the response type for the +// PutDeliverabilityDashboardOption API operation. +type PutDeliverabilityDashboardOptionResponse struct { + *PutDeliverabilityDashboardOptionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutDeliverabilityDashboardOption request. +func (r *PutDeliverabilityDashboardOptionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_PutEmailIdentityDkimAttributes.go b/service/sesv2/api_op_PutEmailIdentityDkimAttributes.go new file mode 100644 index 00000000000..a7709c2de85 --- /dev/null +++ b/service/sesv2/api_op_PutEmailIdentityDkimAttributes.go @@ -0,0 +1,151 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to enable or disable DKIM signing of email that you send from an +// email identity. +type PutEmailIdentityDkimAttributesInput struct { + _ struct{} `type:"structure"` + + // The email identity that you want to change the DKIM settings for. + // + // EmailIdentity is a required field + EmailIdentity *string `location:"uri" locationName:"EmailIdentity" type:"string" required:"true"` + + // Sets the DKIM signing configuration for the identity. + // + // When you set this value true, then the messages that are sent from the identity + // are signed using DKIM. If you set this value to false, your messages are + // sent without DKIM signing. + SigningEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s PutEmailIdentityDkimAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutEmailIdentityDkimAttributesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutEmailIdentityDkimAttributesInput"} + + if s.EmailIdentity == nil { + invalidParams.Add(aws.NewErrParamRequired("EmailIdentity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutEmailIdentityDkimAttributesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.SigningEnabled != nil { + v := *s.SigningEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SigningEnabled", protocol.BoolValue(v), metadata) + } + if s.EmailIdentity != nil { + v := *s.EmailIdentity + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "EmailIdentity", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type PutEmailIdentityDkimAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutEmailIdentityDkimAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutEmailIdentityDkimAttributesOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutEmailIdentityDkimAttributes = "PutEmailIdentityDkimAttributes" + +// PutEmailIdentityDkimAttributesRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Used to enable or disable DKIM authentication for an email identity. +// +// // Example sending a request using PutEmailIdentityDkimAttributesRequest. +// req := client.PutEmailIdentityDkimAttributesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/PutEmailIdentityDkimAttributes +func (c *Client) PutEmailIdentityDkimAttributesRequest(input *PutEmailIdentityDkimAttributesInput) PutEmailIdentityDkimAttributesRequest { + op := &aws.Operation{ + Name: opPutEmailIdentityDkimAttributes, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/identities/{EmailIdentity}/dkim", + } + + if input == nil { + input = &PutEmailIdentityDkimAttributesInput{} + } + + req := c.newRequest(op, input, &PutEmailIdentityDkimAttributesOutput{}) + return PutEmailIdentityDkimAttributesRequest{Request: req, Input: input, Copy: c.PutEmailIdentityDkimAttributesRequest} +} + +// PutEmailIdentityDkimAttributesRequest is the request type for the +// PutEmailIdentityDkimAttributes API operation. +type PutEmailIdentityDkimAttributesRequest struct { + *aws.Request + Input *PutEmailIdentityDkimAttributesInput + Copy func(*PutEmailIdentityDkimAttributesInput) PutEmailIdentityDkimAttributesRequest +} + +// Send marshals and sends the PutEmailIdentityDkimAttributes API request. +func (r PutEmailIdentityDkimAttributesRequest) Send(ctx context.Context) (*PutEmailIdentityDkimAttributesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutEmailIdentityDkimAttributesResponse{ + PutEmailIdentityDkimAttributesOutput: r.Request.Data.(*PutEmailIdentityDkimAttributesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutEmailIdentityDkimAttributesResponse is the response type for the +// PutEmailIdentityDkimAttributes API operation. +type PutEmailIdentityDkimAttributesResponse struct { + *PutEmailIdentityDkimAttributesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutEmailIdentityDkimAttributes request. +func (r *PutEmailIdentityDkimAttributesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_PutEmailIdentityFeedbackAttributes.go b/service/sesv2/api_op_PutEmailIdentityFeedbackAttributes.go new file mode 100644 index 00000000000..1d70bf439b4 --- /dev/null +++ b/service/sesv2/api_op_PutEmailIdentityFeedbackAttributes.go @@ -0,0 +1,168 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to set the attributes that control how bounce and complaint events +// are processed. +type PutEmailIdentityFeedbackAttributesInput struct { + _ struct{} `type:"structure"` + + // Sets the feedback forwarding configuration for the identity. + // + // If the value is true, you receive email notifications when bounce or complaint + // events occur. These notifications are sent to the address that you specified + // in the Return-Path header of the original email. + // + // You're required to have a method of tracking bounces and complaints. If you + // haven't set up another mechanism for receiving bounce or complaint notifications + // (for example, by setting up an event destination), you receive an email notification + // when these events occur (even if this setting is disabled). + EmailForwardingEnabled *bool `type:"boolean"` + + // The email identity that you want to configure bounce and complaint feedback + // forwarding for. + // + // EmailIdentity is a required field + EmailIdentity *string `location:"uri" locationName:"EmailIdentity" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutEmailIdentityFeedbackAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutEmailIdentityFeedbackAttributesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutEmailIdentityFeedbackAttributesInput"} + + if s.EmailIdentity == nil { + invalidParams.Add(aws.NewErrParamRequired("EmailIdentity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutEmailIdentityFeedbackAttributesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.EmailForwardingEnabled != nil { + v := *s.EmailForwardingEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "EmailForwardingEnabled", protocol.BoolValue(v), metadata) + } + if s.EmailIdentity != nil { + v := *s.EmailIdentity + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "EmailIdentity", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type PutEmailIdentityFeedbackAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutEmailIdentityFeedbackAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutEmailIdentityFeedbackAttributesOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutEmailIdentityFeedbackAttributes = "PutEmailIdentityFeedbackAttributes" + +// PutEmailIdentityFeedbackAttributesRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Used to enable or disable feedback forwarding for an identity. This setting +// determines what happens when an identity is used to send an email that results +// in a bounce or complaint event. +// +// If the value is true, you receive email notifications when bounce or complaint +// events occur. These notifications are sent to the address that you specified +// in the Return-Path header of the original email. +// +// You're required to have a method of tracking bounces and complaints. If you +// haven't set up another mechanism for receiving bounce or complaint notifications +// (for example, by setting up an event destination), you receive an email notification +// when these events occur (even if this setting is disabled). +// +// // Example sending a request using PutEmailIdentityFeedbackAttributesRequest. +// req := client.PutEmailIdentityFeedbackAttributesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/PutEmailIdentityFeedbackAttributes +func (c *Client) PutEmailIdentityFeedbackAttributesRequest(input *PutEmailIdentityFeedbackAttributesInput) PutEmailIdentityFeedbackAttributesRequest { + op := &aws.Operation{ + Name: opPutEmailIdentityFeedbackAttributes, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/identities/{EmailIdentity}/feedback", + } + + if input == nil { + input = &PutEmailIdentityFeedbackAttributesInput{} + } + + req := c.newRequest(op, input, &PutEmailIdentityFeedbackAttributesOutput{}) + return PutEmailIdentityFeedbackAttributesRequest{Request: req, Input: input, Copy: c.PutEmailIdentityFeedbackAttributesRequest} +} + +// PutEmailIdentityFeedbackAttributesRequest is the request type for the +// PutEmailIdentityFeedbackAttributes API operation. +type PutEmailIdentityFeedbackAttributesRequest struct { + *aws.Request + Input *PutEmailIdentityFeedbackAttributesInput + Copy func(*PutEmailIdentityFeedbackAttributesInput) PutEmailIdentityFeedbackAttributesRequest +} + +// Send marshals and sends the PutEmailIdentityFeedbackAttributes API request. +func (r PutEmailIdentityFeedbackAttributesRequest) Send(ctx context.Context) (*PutEmailIdentityFeedbackAttributesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutEmailIdentityFeedbackAttributesResponse{ + PutEmailIdentityFeedbackAttributesOutput: r.Request.Data.(*PutEmailIdentityFeedbackAttributesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutEmailIdentityFeedbackAttributesResponse is the response type for the +// PutEmailIdentityFeedbackAttributes API operation. +type PutEmailIdentityFeedbackAttributesResponse struct { + *PutEmailIdentityFeedbackAttributesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutEmailIdentityFeedbackAttributes request. +func (r *PutEmailIdentityFeedbackAttributesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_PutEmailIdentityMailFromAttributes.go b/service/sesv2/api_op_PutEmailIdentityMailFromAttributes.go new file mode 100644 index 00000000000..e2927b61d76 --- /dev/null +++ b/service/sesv2/api_op_PutEmailIdentityMailFromAttributes.go @@ -0,0 +1,172 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to configure the custom MAIL FROM domain for a verified identity. +type PutEmailIdentityMailFromAttributesInput struct { + _ struct{} `type:"structure"` + + // The action that you want to take if the required MX record isn't found when + // you send an email. When you set this value to UseDefaultValue, the mail is + // sent using amazonses.com as the MAIL FROM domain. When you set this value + // to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified + // error, and doesn't attempt to deliver the email. + // + // These behaviors are taken when the custom MAIL FROM domain configuration + // is in the Pending, Failed, and TemporaryFailure states. + BehaviorOnMxFailure BehaviorOnMxFailure `type:"string" enum:"true"` + + // The verified email identity that you want to set up the custom MAIL FROM + // domain for. + // + // EmailIdentity is a required field + EmailIdentity *string `location:"uri" locationName:"EmailIdentity" type:"string" required:"true"` + + // The custom MAIL FROM domain that you want the verified identity to use. The + // MAIL FROM domain must meet the following criteria: + // + // * It has to be a subdomain of the verified identity. + // + // * It can't be used to receive email. + // + // * It can't be used in a "From" address if the MAIL FROM domain is a destination + // for feedback forwarding emails. + MailFromDomain *string `type:"string"` +} + +// String returns the string representation +func (s PutEmailIdentityMailFromAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutEmailIdentityMailFromAttributesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutEmailIdentityMailFromAttributesInput"} + + if s.EmailIdentity == nil { + invalidParams.Add(aws.NewErrParamRequired("EmailIdentity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutEmailIdentityMailFromAttributesInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if len(s.BehaviorOnMxFailure) > 0 { + v := s.BehaviorOnMxFailure + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "BehaviorOnMxFailure", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.MailFromDomain != nil { + v := *s.MailFromDomain + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MailFromDomain", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.EmailIdentity != nil { + v := *s.EmailIdentity + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "EmailIdentity", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type PutEmailIdentityMailFromAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutEmailIdentityMailFromAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PutEmailIdentityMailFromAttributesOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opPutEmailIdentityMailFromAttributes = "PutEmailIdentityMailFromAttributes" + +// PutEmailIdentityMailFromAttributesRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Used to enable or disable the custom Mail-From domain configuration for an +// email identity. +// +// // Example sending a request using PutEmailIdentityMailFromAttributesRequest. +// req := client.PutEmailIdentityMailFromAttributesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/PutEmailIdentityMailFromAttributes +func (c *Client) PutEmailIdentityMailFromAttributesRequest(input *PutEmailIdentityMailFromAttributesInput) PutEmailIdentityMailFromAttributesRequest { + op := &aws.Operation{ + Name: opPutEmailIdentityMailFromAttributes, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/identities/{EmailIdentity}/mail-from", + } + + if input == nil { + input = &PutEmailIdentityMailFromAttributesInput{} + } + + req := c.newRequest(op, input, &PutEmailIdentityMailFromAttributesOutput{}) + return PutEmailIdentityMailFromAttributesRequest{Request: req, Input: input, Copy: c.PutEmailIdentityMailFromAttributesRequest} +} + +// PutEmailIdentityMailFromAttributesRequest is the request type for the +// PutEmailIdentityMailFromAttributes API operation. +type PutEmailIdentityMailFromAttributesRequest struct { + *aws.Request + Input *PutEmailIdentityMailFromAttributesInput + Copy func(*PutEmailIdentityMailFromAttributesInput) PutEmailIdentityMailFromAttributesRequest +} + +// Send marshals and sends the PutEmailIdentityMailFromAttributes API request. +func (r PutEmailIdentityMailFromAttributesRequest) Send(ctx context.Context) (*PutEmailIdentityMailFromAttributesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutEmailIdentityMailFromAttributesResponse{ + PutEmailIdentityMailFromAttributesOutput: r.Request.Data.(*PutEmailIdentityMailFromAttributesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutEmailIdentityMailFromAttributesResponse is the response type for the +// PutEmailIdentityMailFromAttributes API operation. +type PutEmailIdentityMailFromAttributesResponse struct { + *PutEmailIdentityMailFromAttributesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutEmailIdentityMailFromAttributes request. +func (r *PutEmailIdentityMailFromAttributesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_SendEmail.go b/service/sesv2/api_op_SendEmail.go new file mode 100644 index 00000000000..a8108b0c2cd --- /dev/null +++ b/service/sesv2/api_op_SendEmail.go @@ -0,0 +1,251 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to send an email message. +type SendEmailInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that you want to use when sending the email. + ConfigurationSetName *string `type:"string"` + + // An object that contains the body of the message. You can send either a Simple + // message or a Raw message. + // + // Content is a required field + Content *EmailContent `type:"structure" required:"true"` + + // An object that contains the recipients of the email message. + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // A list of tags, in the form of name/value pairs, to apply to an email that + // you send using the SendEmail operation. Tags correspond to characteristics + // of the email that you define, so that you can publish email sending events. + EmailTags []MessageTag `type:"list"` + + // The address that you want bounce and complaint notifications to be sent to. + FeedbackForwardingEmailAddress *string `type:"string"` + + // The email address that you want to use as the "From" address for the email. + // The address that you specify has to be verified. + FromEmailAddress *string `type:"string"` + + // The "Reply-to" email addresses for the message. When the recipient replies + // to the message, each Reply-to address receives the reply. + ReplyToAddresses []string `type:"list"` +} + +// String returns the string representation +func (s SendEmailInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendEmailInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SendEmailInput"} + + if s.Content == nil { + invalidParams.Add(aws.NewErrParamRequired("Content")) + } + + if s.Destination == nil { + invalidParams.Add(aws.NewErrParamRequired("Destination")) + } + if s.Content != nil { + if err := s.Content.Validate(); err != nil { + invalidParams.AddNested("Content", err.(aws.ErrInvalidParams)) + } + } + if s.EmailTags != nil { + for i, v := range s.EmailTags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EmailTags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s SendEmailInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Content != nil { + v := s.Content + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Content", v, metadata) + } + if s.Destination != nil { + v := s.Destination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Destination", v, metadata) + } + if s.EmailTags != nil { + v := s.EmailTags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "EmailTags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.FeedbackForwardingEmailAddress != nil { + v := *s.FeedbackForwardingEmailAddress + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "FeedbackForwardingEmailAddress", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.FromEmailAddress != nil { + v := *s.FromEmailAddress + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "FromEmailAddress", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ReplyToAddresses != nil { + v := s.ReplyToAddresses + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ReplyToAddresses", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +// A unique message ID that you receive when an email is accepted for sending. +type SendEmailOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the message that is generated when the message is + // accepted. + // + // It is possible for the Amazon SES API v2 to accept a message without sending + // it. This can happen when the message that you're trying to send has an attachment + // contains a virus, or when you send a templated email that contains invalid + // personalization content, for example. + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s SendEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s SendEmailOutput) MarshalFields(e protocol.FieldEncoder) error { + if s.MessageId != nil { + v := *s.MessageId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MessageId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +const opSendEmail = "SendEmail" + +// SendEmailRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Sends an email message. You can use the Amazon SES API v2 to send two types +// of messages: +// +// * Simple – A standard email message. When you create this type of message, +// you specify the sender, the recipient, and the message body, and the Amazon +// SES API v2 assembles the message for you. +// +// * Raw – A raw, MIME-formatted email message. When you send this type +// of email, you have to specify all of the message headers, as well as the +// message body. You can use this message type to send messages that contain +// attachments. The message that you specify has to be a valid MIME message. +// +// // Example sending a request using SendEmailRequest. +// req := client.SendEmailRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/SendEmail +func (c *Client) SendEmailRequest(input *SendEmailInput) SendEmailRequest { + op := &aws.Operation{ + Name: opSendEmail, + HTTPMethod: "POST", + HTTPPath: "/v2/email/outbound-emails", + } + + if input == nil { + input = &SendEmailInput{} + } + + req := c.newRequest(op, input, &SendEmailOutput{}) + return SendEmailRequest{Request: req, Input: input, Copy: c.SendEmailRequest} +} + +// SendEmailRequest is the request type for the +// SendEmail API operation. +type SendEmailRequest struct { + *aws.Request + Input *SendEmailInput + Copy func(*SendEmailInput) SendEmailRequest +} + +// Send marshals and sends the SendEmail API request. +func (r SendEmailRequest) Send(ctx context.Context) (*SendEmailResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &SendEmailResponse{ + SendEmailOutput: r.Request.Data.(*SendEmailOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// SendEmailResponse is the response type for the +// SendEmail API operation. +type SendEmailResponse struct { + *SendEmailOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// SendEmail request. +func (r *SendEmailResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_TagResource.go b/service/sesv2/api_op_TagResource.go new file mode 100644 index 00000000000..79b04e4d142 --- /dev/null +++ b/service/sesv2/api_op_TagResource.go @@ -0,0 +1,176 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource that you want to add one or + // more tags to. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` + + // A list of the tags that you want to add to the resource. A tag consists of + // a required tag key (Key) and an associated tag value (Value). The maximum + // length of a tag key is 128 characters. The maximum length of a tag value + // is 256 characters. + // + // Tags is a required field + Tags []Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TagResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if s.Tags == nil { + invalidParams.Add(aws.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ResourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Tags != nil { + v := s.Tags + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tags", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opTagResource = "TagResource" + +// TagResourceRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Add one or more tags (keys and values) to a specified resource. A tag is +// a label that you optionally define and associate with a resource. Tags can +// help you categorize and manage resources in different ways, such as by purpose, +// owner, environment, or other criteria. A resource can have as many as 50 +// tags. +// +// Each tag consists of a required tag key and an associated tag value, both +// of which you define. A tag key is a general label that acts as a category +// for more specific tag values. A tag value acts as a descriptor within a tag +// key. +// +// // Example sending a request using TagResourceRequest. +// req := client.TagResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/TagResource +func (c *Client) TagResourceRequest(input *TagResourceInput) TagResourceRequest { + op := &aws.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/v2/email/tags", + } + + if input == nil { + input = &TagResourceInput{} + } + + req := c.newRequest(op, input, &TagResourceOutput{}) + return TagResourceRequest{Request: req, Input: input, Copy: c.TagResourceRequest} +} + +// TagResourceRequest is the request type for the +// TagResource API operation. +type TagResourceRequest struct { + *aws.Request + Input *TagResourceInput + Copy func(*TagResourceInput) TagResourceRequest +} + +// Send marshals and sends the TagResource API request. +func (r TagResourceRequest) Send(ctx context.Context) (*TagResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &TagResourceResponse{ + TagResourceOutput: r.Request.Data.(*TagResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// TagResourceResponse is the response type for the +// TagResource API operation. +type TagResourceResponse struct { + *TagResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// TagResource request. +func (r *TagResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_UntagResource.go b/service/sesv2/api_op_UntagResource.go new file mode 100644 index 00000000000..b78b46db434 --- /dev/null +++ b/service/sesv2/api_op_UntagResource.go @@ -0,0 +1,161 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource that you want to remove one + // or more tags from. + // + // ResourceArn is a required field + ResourceArn *string `location:"querystring" locationName:"ResourceArn" type:"string" required:"true"` + + // The tags (tag keys) that you want to remove from the resource. When you specify + // a tag key, the action removes both that key and its associated tag value. + // + // To remove more than one tag from the resource, append the TagKeys parameter + // and argument for each additional tag to remove, separated by an ampersand. + // For example: /v2/email/tags?ResourceArn=ResourceArn&TagKeys=Key1&TagKeys=Key2 + // + // TagKeys is a required field + TagKeys []string `location:"querystring" locationName:"TagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UntagResourceInput"} + + if s.ResourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceArn")) + } + + if s.TagKeys == nil { + invalidParams.Add(aws.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UntagResourceInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.ResourceArn != nil { + v := *s.ResourceArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.QueryTarget, "ResourceArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TagKeys != nil { + v := s.TagKeys + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.QueryTarget, "TagKeys", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UntagResourceOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Remove one or more tags (keys and values) from a specified resource. +// +// // Example sending a request using UntagResourceRequest. +// req := client.UntagResourceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/UntagResource +func (c *Client) UntagResourceRequest(input *UntagResourceInput) UntagResourceRequest { + op := &aws.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/v2/email/tags", + } + + if input == nil { + input = &UntagResourceInput{} + } + + req := c.newRequest(op, input, &UntagResourceOutput{}) + return UntagResourceRequest{Request: req, Input: input, Copy: c.UntagResourceRequest} +} + +// UntagResourceRequest is the request type for the +// UntagResource API operation. +type UntagResourceRequest struct { + *aws.Request + Input *UntagResourceInput + Copy func(*UntagResourceInput) UntagResourceRequest +} + +// Send marshals and sends the UntagResource API request. +func (r UntagResourceRequest) Send(ctx context.Context) (*UntagResourceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UntagResourceResponse{ + UntagResourceOutput: r.Request.Data.(*UntagResourceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UntagResourceResponse is the response type for the +// UntagResource API operation. +type UntagResourceResponse struct { + *UntagResourceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UntagResource request. +func (r *UntagResourceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_op_UpdateConfigurationSetEventDestination.go b/service/sesv2/api_op_UpdateConfigurationSetEventDestination.go new file mode 100644 index 00000000000..b797fb85aaa --- /dev/null +++ b/service/sesv2/api_op_UpdateConfigurationSetEventDestination.go @@ -0,0 +1,180 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +// A request to change the settings for an event destination for a configuration +// set. +type UpdateConfigurationSetEventDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the configuration set that contains the event destination that + // you want to modify. + // + // ConfigurationSetName is a required field + ConfigurationSetName *string `location:"uri" locationName:"ConfigurationSetName" type:"string" required:"true"` + + // An object that defines the event destination. + // + // EventDestination is a required field + EventDestination *EventDestinationDefinition `type:"structure" required:"true"` + + // The name of the event destination that you want to modify. + // + // EventDestinationName is a required field + EventDestinationName *string `location:"uri" locationName:"EventDestinationName" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateConfigurationSetEventDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateConfigurationSetEventDestinationInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateConfigurationSetEventDestinationInput"} + + if s.ConfigurationSetName == nil { + invalidParams.Add(aws.NewErrParamRequired("ConfigurationSetName")) + } + + if s.EventDestination == nil { + invalidParams.Add(aws.NewErrParamRequired("EventDestination")) + } + + if s.EventDestinationName == nil { + invalidParams.Add(aws.NewErrParamRequired("EventDestinationName")) + } + if s.EventDestination != nil { + if err := s.EventDestination.Validate(); err != nil { + invalidParams.AddNested("EventDestination", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateConfigurationSetEventDestinationInput) MarshalFields(e protocol.FieldEncoder) error { + e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{}) + + if s.EventDestination != nil { + v := s.EventDestination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "EventDestination", v, metadata) + } + if s.ConfigurationSetName != nil { + v := *s.ConfigurationSetName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "ConfigurationSetName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.EventDestinationName != nil { + v := *s.EventDestinationName + + metadata := protocol.Metadata{} + e.SetValue(protocol.PathTarget, "EventDestinationName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An HTTP 200 response if the request succeeds, or an error message if the +// request fails. +type UpdateConfigurationSetEventDestinationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateConfigurationSetEventDestinationOutput) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s UpdateConfigurationSetEventDestinationOutput) MarshalFields(e protocol.FieldEncoder) error { + return nil +} + +const opUpdateConfigurationSetEventDestination = "UpdateConfigurationSetEventDestination" + +// UpdateConfigurationSetEventDestinationRequest returns a request value for making API operation for +// Amazon Simple Email Service. +// +// Update the configuration of an event destination for a configuration set. +// +// Events include message sends, deliveries, opens, clicks, bounces, and complaints. +// Event destinations are places that you can send information about these events +// to. For example, you can send event data to Amazon SNS to receive notifications +// when you receive bounces or complaints, or you can use Amazon Kinesis Data +// Firehose to stream data to Amazon S3 for long-term storage. +// +// // Example sending a request using UpdateConfigurationSetEventDestinationRequest. +// req := client.UpdateConfigurationSetEventDestinationRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/UpdateConfigurationSetEventDestination +func (c *Client) UpdateConfigurationSetEventDestinationRequest(input *UpdateConfigurationSetEventDestinationInput) UpdateConfigurationSetEventDestinationRequest { + op := &aws.Operation{ + Name: opUpdateConfigurationSetEventDestination, + HTTPMethod: "PUT", + HTTPPath: "/v2/email/configuration-sets/{ConfigurationSetName}/event-destinations/{EventDestinationName}", + } + + if input == nil { + input = &UpdateConfigurationSetEventDestinationInput{} + } + + req := c.newRequest(op, input, &UpdateConfigurationSetEventDestinationOutput{}) + return UpdateConfigurationSetEventDestinationRequest{Request: req, Input: input, Copy: c.UpdateConfigurationSetEventDestinationRequest} +} + +// UpdateConfigurationSetEventDestinationRequest is the request type for the +// UpdateConfigurationSetEventDestination API operation. +type UpdateConfigurationSetEventDestinationRequest struct { + *aws.Request + Input *UpdateConfigurationSetEventDestinationInput + Copy func(*UpdateConfigurationSetEventDestinationInput) UpdateConfigurationSetEventDestinationRequest +} + +// Send marshals and sends the UpdateConfigurationSetEventDestination API request. +func (r UpdateConfigurationSetEventDestinationRequest) Send(ctx context.Context) (*UpdateConfigurationSetEventDestinationResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateConfigurationSetEventDestinationResponse{ + UpdateConfigurationSetEventDestinationOutput: r.Request.Data.(*UpdateConfigurationSetEventDestinationOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateConfigurationSetEventDestinationResponse is the response type for the +// UpdateConfigurationSetEventDestination API operation. +type UpdateConfigurationSetEventDestinationResponse struct { + *UpdateConfigurationSetEventDestinationOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateConfigurationSetEventDestination request. +func (r *UpdateConfigurationSetEventDestinationResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/sesv2/api_types.go b/service/sesv2/api_types.go new file mode 100644 index 00000000000..fcbb23d2459 --- /dev/null +++ b/service/sesv2/api_types.go @@ -0,0 +1,2262 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sesv2 + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" +) + +var _ aws.Config +var _ = awsutil.Prettify + +// An object that contains information about a blacklisting event that impacts +// one of the dedicated IP addresses that is associated with your account. +type BlacklistEntry struct { + _ struct{} `type:"structure"` + + // Additional information about the blacklisting event, as provided by the blacklist + // maintainer. + Description *string `type:"string"` + + // The time when the blacklisting event occurred, shown in Unix time format. + ListingTime *time.Time `type:"timestamp"` + + // The name of the blacklist that the IP address appears on. + RblName *string `type:"string"` +} + +// String returns the string representation +func (s BlacklistEntry) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s BlacklistEntry) MarshalFields(e protocol.FieldEncoder) error { + if s.Description != nil { + v := *s.Description + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Description", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ListingTime != nil { + v := *s.ListingTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ListingTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.RblName != nil { + v := *s.RblName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "RblName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Represents the body of the email message. +type Body struct { + _ struct{} `type:"structure"` + + // An object that represents the version of the message that is displayed in + // email clients that support HTML. HTML messages can include formatted text, + // hyperlinks, images, and more. + Html *Content `type:"structure"` + + // An object that represents the version of the message that is displayed in + // email clients that don't support HTML, or clients where the recipient has + // disabled HTML rendering. + Text *Content `type:"structure"` +} + +// String returns the string representation +func (s Body) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Body) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "Body"} + if s.Html != nil { + if err := s.Html.Validate(); err != nil { + invalidParams.AddNested("Html", err.(aws.ErrInvalidParams)) + } + } + if s.Text != nil { + if err := s.Text.Validate(); err != nil { + invalidParams.AddNested("Text", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Body) MarshalFields(e protocol.FieldEncoder) error { + if s.Html != nil { + v := s.Html + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Html", v, metadata) + } + if s.Text != nil { + v := s.Text + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Text", v, metadata) + } + return nil +} + +// An object that defines an Amazon CloudWatch destination for email events. +// You can use Amazon CloudWatch to monitor and gain insights on your email +// sending metrics. +type CloudWatchDestination struct { + _ struct{} `type:"structure"` + + // An array of objects that define the dimensions to use when you send email + // events to Amazon CloudWatch. + // + // DimensionConfigurations is a required field + DimensionConfigurations []CloudWatchDimensionConfiguration `type:"list" required:"true"` +} + +// String returns the string representation +func (s CloudWatchDestination) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloudWatchDestination) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CloudWatchDestination"} + + if s.DimensionConfigurations == nil { + invalidParams.Add(aws.NewErrParamRequired("DimensionConfigurations")) + } + if s.DimensionConfigurations != nil { + for i, v := range s.DimensionConfigurations { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DimensionConfigurations", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CloudWatchDestination) MarshalFields(e protocol.FieldEncoder) error { + if s.DimensionConfigurations != nil { + v := s.DimensionConfigurations + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DimensionConfigurations", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + return nil +} + +// An object that defines the dimension configuration to use when you send email +// events to Amazon CloudWatch. +type CloudWatchDimensionConfiguration struct { + _ struct{} `type:"structure"` + + // The default value of the dimension that is published to Amazon CloudWatch + // if you don't provide the value of the dimension when you send an email. This + // value has to meet the following criteria: + // + // * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores + // (_), or dashes (-). + // + // * It can contain no more than 256 characters. + // + // DefaultDimensionValue is a required field + DefaultDimensionValue *string `type:"string" required:"true"` + + // The name of an Amazon CloudWatch dimension associated with an email sending + // metric. The name has to meet the following criteria: + // + // * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores + // (_), or dashes (-). + // + // * It can contain no more than 256 characters. + // + // DimensionName is a required field + DimensionName *string `type:"string" required:"true"` + + // The location where the Amazon SES API v2 finds the value of a dimension to + // publish to Amazon CloudWatch. If you want to use the message tags that you + // specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail + // or SendRawEmail API, choose messageTag. If you want to use your own email + // headers, choose emailHeader. If you want to use link tags, choose linkTags. + // + // DimensionValueSource is a required field + DimensionValueSource DimensionValueSource `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s CloudWatchDimensionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloudWatchDimensionConfiguration) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CloudWatchDimensionConfiguration"} + + if s.DefaultDimensionValue == nil { + invalidParams.Add(aws.NewErrParamRequired("DefaultDimensionValue")) + } + + if s.DimensionName == nil { + invalidParams.Add(aws.NewErrParamRequired("DimensionName")) + } + if len(s.DimensionValueSource) == 0 { + invalidParams.Add(aws.NewErrParamRequired("DimensionValueSource")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s CloudWatchDimensionConfiguration) MarshalFields(e protocol.FieldEncoder) error { + if s.DefaultDimensionValue != nil { + v := *s.DefaultDimensionValue + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DefaultDimensionValue", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DimensionName != nil { + v := *s.DimensionName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DimensionName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.DimensionValueSource) > 0 { + v := s.DimensionValueSource + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DimensionValueSource", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// An object that represents the content of the email, and optionally a character +// set specification. +type Content struct { + _ struct{} `type:"structure"` + + // The character set for the content. Because of the constraints of the SMTP + // protocol, the Amazon SES API v2 uses 7-bit ASCII by default. If the text + // includes characters outside of the ASCII range, you have to specify a character + // set. For example, you could specify UTF-8, ISO-8859-1, or Shift_JIS. + Charset *string `type:"string"` + + // The content of the message itself. + // + // Data is a required field + Data *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Content) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Content) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "Content"} + + if s.Data == nil { + invalidParams.Add(aws.NewErrParamRequired("Data")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Content) MarshalFields(e protocol.FieldEncoder) error { + if s.Charset != nil { + v := *s.Charset + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Charset", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Data != nil { + v := *s.Data + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Data", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An object that contains information about the volume of email sent on each +// day of the analysis period. +type DailyVolume struct { + _ struct{} `type:"structure"` + + // An object that contains inbox placement metrics for a specified day in the + // analysis period, broken out by the recipient's email provider. + DomainIspPlacements []DomainIspPlacement `type:"list"` + + // The date that the DailyVolume metrics apply to, in Unix time. + StartDate *time.Time `type:"timestamp"` + + // An object that contains inbox placement metrics for a specific day in the + // analysis period. + VolumeStatistics *VolumeStatistics `type:"structure"` +} + +// String returns the string representation +func (s DailyVolume) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DailyVolume) MarshalFields(e protocol.FieldEncoder) error { + if s.DomainIspPlacements != nil { + v := s.DomainIspPlacements + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DomainIspPlacements", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.StartDate != nil { + v := *s.StartDate + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "StartDate", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.VolumeStatistics != nil { + v := s.VolumeStatistics + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "VolumeStatistics", v, metadata) + } + return nil +} + +// Contains information about a dedicated IP address that is associated with +// your Amazon SES API v2 account. +// +// To learn more about requesting dedicated IP addresses, see Requesting and +// Relinquishing Dedicated IP Addresses (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/dedicated-ip-case.html) +// in the Amazon SES Developer Guide. +type DedicatedIp struct { + _ struct{} `type:"structure"` + + // An IPv4 address. + // + // Ip is a required field + Ip *string `type:"string" required:"true"` + + // The name of the dedicated IP pool that the IP address is associated with. + PoolName *string `type:"string"` + + // Indicates how complete the dedicated IP warm-up process is. When this value + // equals 1, the address has completed the warm-up process and is ready for + // use. + // + // WarmupPercentage is a required field + WarmupPercentage *int64 `type:"integer" required:"true"` + + // The warm-up status of a dedicated IP address. The status can have one of + // the following values: + // + // * IN_PROGRESS – The IP address isn't ready to use because the dedicated + // IP warm-up process is ongoing. + // + // * DONE – The dedicated IP warm-up process is complete, and the IP address + // is ready to use. + // + // WarmupStatus is a required field + WarmupStatus WarmupStatus `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s DedicatedIp) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DedicatedIp) MarshalFields(e protocol.FieldEncoder) error { + if s.Ip != nil { + v := *s.Ip + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Ip", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PoolName != nil { + v := *s.PoolName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "PoolName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.WarmupPercentage != nil { + v := *s.WarmupPercentage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "WarmupPercentage", protocol.Int64Value(v), metadata) + } + if len(s.WarmupStatus) > 0 { + v := s.WarmupStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "WarmupStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// An object that contains metadata related to a predictive inbox placement +// test. +type DeliverabilityTestReport struct { + _ struct{} `type:"structure"` + + // The date and time when the predictive inbox placement test was created, in + // Unix time format. + CreateDate *time.Time `type:"timestamp"` + + // The status of the predictive inbox placement test. If the status is IN_PROGRESS, + // then the predictive inbox placement test is currently running. Predictive + // inbox placement tests are usually complete within 24 hours of creating the + // test. If the status is COMPLETE, then the test is finished, and you can use + // the GetDeliverabilityTestReport to view the results of the test. + DeliverabilityTestStatus DeliverabilityTestStatus `type:"string" enum:"true"` + + // The sender address that you specified for the predictive inbox placement + // test. + FromEmailAddress *string `type:"string"` + + // A unique string that identifies the predictive inbox placement test. + ReportId *string `type:"string"` + + // A name that helps you identify a predictive inbox placement test report. + ReportName *string `type:"string"` + + // The subject line for an email that you submitted in a predictive inbox placement + // test. + Subject *string `type:"string"` +} + +// String returns the string representation +func (s DeliverabilityTestReport) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeliverabilityTestReport) MarshalFields(e protocol.FieldEncoder) error { + if s.CreateDate != nil { + v := *s.CreateDate + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CreateDate", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if len(s.DeliverabilityTestStatus) > 0 { + v := s.DeliverabilityTestStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DeliverabilityTestStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.FromEmailAddress != nil { + v := *s.FromEmailAddress + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "FromEmailAddress", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ReportId != nil { + v := *s.ReportId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ReportId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ReportName != nil { + v := *s.ReportName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ReportName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Subject != nil { + v := *s.Subject + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Subject", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// Used to associate a configuration set with a dedicated IP pool. +type DeliveryOptions struct { + _ struct{} `type:"structure"` + + // The name of the dedicated IP pool that you want to associate with the configuration + // set. + SendingPoolName *string `type:"string"` + + // Specifies whether messages that use the configuration set are required to + // use Transport Layer Security (TLS). If the value is Require, messages are + // only delivered if a TLS connection can be established. If the value is Optional, + // messages can be delivered in plain text if a TLS connection can't be established. + TlsPolicy TlsPolicy `type:"string" enum:"true"` +} + +// String returns the string representation +func (s DeliveryOptions) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DeliveryOptions) MarshalFields(e protocol.FieldEncoder) error { + if s.SendingPoolName != nil { + v := *s.SendingPoolName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SendingPoolName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.TlsPolicy) > 0 { + v := s.TlsPolicy + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TlsPolicy", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// An object that describes the recipients for an email. +type Destination struct { + _ struct{} `type:"structure"` + + // An array that contains the email addresses of the "BCC" (blind carbon copy) + // recipients for the email. + BccAddresses []string `type:"list"` + + // An array that contains the email addresses of the "CC" (carbon copy) recipients + // for the email. + CcAddresses []string `type:"list"` + + // An array that contains the email addresses of the "To" recipients for the + // email. + ToAddresses []string `type:"list"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Destination) MarshalFields(e protocol.FieldEncoder) error { + if s.BccAddresses != nil { + v := s.BccAddresses + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "BccAddresses", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.CcAddresses != nil { + v := s.CcAddresses + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "CcAddresses", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.ToAddresses != nil { + v := s.ToAddresses + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "ToAddresses", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +// An object that contains information about the DKIM configuration for an email +// identity. +type DkimAttributes struct { + _ struct{} `type:"structure"` + + // If the value is true, then the messages that you send from the identity are + // signed using DKIM. If the value is false, then the messages that you send + // from the identity aren't DKIM-signed. + SigningEnabled *bool `type:"boolean"` + + // Describes whether or not Amazon SES has successfully located the DKIM records + // in the DNS records for the domain. The status can be one of the following: + // + // * PENDING – Amazon SES hasn't yet detected the DKIM records in the DNS + // configuration for the domain, but will continue to attempt to locate them. + // + // * SUCCESS – Amazon SES located the DKIM records in the DNS configuration + // for the domain and determined that they're correct. You can now send DKIM-signed + // email from the identity. + // + // * FAILED – Amazon SES wasn't able to locate the DKIM records in the + // DNS settings for the domain, and won't continue to search for them. + // + // * TEMPORARY_FAILURE – A temporary issue occurred, which prevented Amazon + // SES from determining the DKIM status for the domain. + // + // * NOT_STARTED – Amazon SES hasn't yet started searching for the DKIM + // records in the DKIM records for the domain. + Status DkimStatus `type:"string" enum:"true"` + + // A set of unique strings that you use to create a set of CNAME records that + // you add to the DNS configuration for your domain. When Amazon SES detects + // these records in the DNS configuration for your domain, the DKIM authentication + // process is complete. Amazon SES usually detects these records within about + // 72 hours of adding them to the DNS configuration for your domain. + Tokens []string `type:"list"` +} + +// String returns the string representation +func (s DkimAttributes) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DkimAttributes) MarshalFields(e protocol.FieldEncoder) error { + if s.SigningEnabled != nil { + v := *s.SigningEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SigningEnabled", protocol.BoolValue(v), metadata) + } + if len(s.Status) > 0 { + v := s.Status + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Status", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.Tokens != nil { + v := s.Tokens + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Tokens", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +// An object that contains the deliverability data for a specific campaign. +// This data is available for a campaign only if the campaign sent email by +// using a domain that the Deliverability dashboard is enabled for (PutDeliverabilityDashboardOption +// operation). +type DomainDeliverabilityCampaign struct { + _ struct{} `type:"structure"` + + // The unique identifier for the campaign. The Deliverability dashboard automatically + // generates and assigns this identifier to a campaign. + CampaignId *string `type:"string"` + + // The percentage of email messages that were deleted by recipients, without + // being opened first. Due to technical limitations, this value only includes + // recipients who opened the message by using an email client that supports + // images. + DeleteRate *float64 `type:"double"` + + // The major email providers who handled the email message. + Esps []string `type:"list"` + + // The first time, in Unix time format, when the email message was delivered + // to any recipient's inbox. This value can help you determine how long it took + // for a campaign to deliver an email message. + FirstSeenDateTime *time.Time `type:"timestamp"` + + // The verified email address that the email message was sent from. + FromAddress *string `type:"string"` + + // The URL of an image that contains a snapshot of the email message that was + // sent. + ImageUrl *string `type:"string"` + + // The number of email messages that were delivered to recipients’ inboxes. + InboxCount *int64 `type:"long"` + + // The last time, in Unix time format, when the email message was delivered + // to any recipient's inbox. This value can help you determine how long it took + // for a campaign to deliver an email message. + LastSeenDateTime *time.Time `type:"timestamp"` + + // The projected number of recipients that the email message was sent to. + ProjectedVolume *int64 `type:"long"` + + // The percentage of email messages that were opened and then deleted by recipients. + // Due to technical limitations, this value only includes recipients who opened + // the message by using an email client that supports images. + ReadDeleteRate *float64 `type:"double"` + + // The percentage of email messages that were opened by recipients. Due to technical + // limitations, this value only includes recipients who opened the message by + // using an email client that supports images. + ReadRate *float64 `type:"double"` + + // The IP addresses that were used to send the email message. + SendingIps []string `type:"list"` + + // The number of email messages that were delivered to recipients' spam or junk + // mail folders. + SpamCount *int64 `type:"long"` + + // The subject line, or title, of the email message. + Subject *string `type:"string"` +} + +// String returns the string representation +func (s DomainDeliverabilityCampaign) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DomainDeliverabilityCampaign) MarshalFields(e protocol.FieldEncoder) error { + if s.CampaignId != nil { + v := *s.CampaignId + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CampaignId", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.DeleteRate != nil { + v := *s.DeleteRate + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DeleteRate", protocol.Float64Value(v), metadata) + } + if s.Esps != nil { + v := s.Esps + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "Esps", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.FirstSeenDateTime != nil { + v := *s.FirstSeenDateTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "FirstSeenDateTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.FromAddress != nil { + v := *s.FromAddress + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "FromAddress", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.ImageUrl != nil { + v := *s.ImageUrl + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ImageUrl", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.InboxCount != nil { + v := *s.InboxCount + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "InboxCount", protocol.Int64Value(v), metadata) + } + if s.LastSeenDateTime != nil { + v := *s.LastSeenDateTime + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastSeenDateTime", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.ProjectedVolume != nil { + v := *s.ProjectedVolume + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ProjectedVolume", protocol.Int64Value(v), metadata) + } + if s.ReadDeleteRate != nil { + v := *s.ReadDeleteRate + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ReadDeleteRate", protocol.Float64Value(v), metadata) + } + if s.ReadRate != nil { + v := *s.ReadRate + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ReadRate", protocol.Float64Value(v), metadata) + } + if s.SendingIps != nil { + v := s.SendingIps + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "SendingIps", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.SpamCount != nil { + v := *s.SpamCount + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SpamCount", protocol.Int64Value(v), metadata) + } + if s.Subject != nil { + v := *s.Subject + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Subject", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An object that contains information about the Deliverability dashboard subscription +// for a verified domain that you use to send email and currently has an active +// Deliverability dashboard subscription. If a Deliverability dashboard subscription +// is active for a domain, you gain access to reputation, inbox placement, and +// other metrics for the domain. +type DomainDeliverabilityTrackingOption struct { + _ struct{} `type:"structure"` + + // A verified domain that’s associated with your AWS account and currently + // has an active Deliverability dashboard subscription. + Domain *string `type:"string"` + + // An object that contains information about the inbox placement data settings + // for the domain. + InboxPlacementTrackingOption *InboxPlacementTrackingOption `type:"structure"` + + // The date, in Unix time format, when you enabled the Deliverability dashboard + // for the domain. + SubscriptionStartDate *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s DomainDeliverabilityTrackingOption) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DomainDeliverabilityTrackingOption) MarshalFields(e protocol.FieldEncoder) error { + if s.Domain != nil { + v := *s.Domain + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Domain", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.InboxPlacementTrackingOption != nil { + v := s.InboxPlacementTrackingOption + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "InboxPlacementTrackingOption", v, metadata) + } + if s.SubscriptionStartDate != nil { + v := *s.SubscriptionStartDate + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SubscriptionStartDate", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + return nil +} + +// An object that contains inbox placement data for email sent from one of your +// email domains to a specific email provider. +type DomainIspPlacement struct { + _ struct{} `type:"structure"` + + // The percentage of messages that were sent from the selected domain to the + // specified email provider that arrived in recipients' inboxes. + InboxPercentage *float64 `type:"double"` + + // The total number of messages that were sent from the selected domain to the + // specified email provider that arrived in recipients' inboxes. + InboxRawCount *int64 `type:"long"` + + // The name of the email provider that the inbox placement data applies to. + IspName *string `type:"string"` + + // The percentage of messages that were sent from the selected domain to the + // specified email provider that arrived in recipients' spam or junk mail folders. + SpamPercentage *float64 `type:"double"` + + // The total number of messages that were sent from the selected domain to the + // specified email provider that arrived in recipients' spam or junk mail folders. + SpamRawCount *int64 `type:"long"` +} + +// String returns the string representation +func (s DomainIspPlacement) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s DomainIspPlacement) MarshalFields(e protocol.FieldEncoder) error { + if s.InboxPercentage != nil { + v := *s.InboxPercentage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "InboxPercentage", protocol.Float64Value(v), metadata) + } + if s.InboxRawCount != nil { + v := *s.InboxRawCount + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "InboxRawCount", protocol.Int64Value(v), metadata) + } + if s.IspName != nil { + v := *s.IspName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IspName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.SpamPercentage != nil { + v := *s.SpamPercentage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SpamPercentage", protocol.Float64Value(v), metadata) + } + if s.SpamRawCount != nil { + v := *s.SpamRawCount + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SpamRawCount", protocol.Int64Value(v), metadata) + } + return nil +} + +// An object that defines the entire content of the email, including the message +// headers and the body content. You can create a simple email message, in which +// you specify the subject and the text and HTML versions of the message body. +// You can also create raw messages, in which you specify a complete MIME-formatted +// message. Raw messages can include attachments and custom headers. +type EmailContent struct { + _ struct{} `type:"structure"` + + // The raw email message. The message has to meet the following criteria: + // + // * The message has to contain a header and a body, separated by one blank + // line. + // + // * All of the required header fields must be present in the message. + // + // * Each part of a multipart MIME message must be formatted properly. + // + // * If you include attachments, they must be in a file format that the Amazon + // SES API v2 supports. + // + // * The entire message must be Base64 encoded. + // + // * If any of the MIME parts in your message contain content that is outside + // of the 7-bit ASCII character range, you should encode that content to + // ensure that recipients' email clients render the message properly. + // + // * The length of any single line of text in the message can't exceed 1,000 + // characters. This restriction is defined in RFC 5321 (https://tools.ietf.org/html/rfc5321). + Raw *RawMessage `type:"structure"` + + // The simple email message. The message consists of a subject and a message + // body. + Simple *Message `type:"structure"` + + // The template to use for the email message. + Template *Template `type:"structure"` +} + +// String returns the string representation +func (s EmailContent) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EmailContent) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "EmailContent"} + if s.Raw != nil { + if err := s.Raw.Validate(); err != nil { + invalidParams.AddNested("Raw", err.(aws.ErrInvalidParams)) + } + } + if s.Simple != nil { + if err := s.Simple.Validate(); err != nil { + invalidParams.AddNested("Simple", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s EmailContent) MarshalFields(e protocol.FieldEncoder) error { + if s.Raw != nil { + v := s.Raw + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Raw", v, metadata) + } + if s.Simple != nil { + v := s.Simple + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Simple", v, metadata) + } + if s.Template != nil { + v := s.Template + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Template", v, metadata) + } + return nil +} + +// In the Amazon SES API v2, events include message sends, deliveries, opens, +// clicks, bounces, and complaints. Event destinations are places that you can +// send information about these events to. For example, you can send event data +// to Amazon SNS to receive notifications when you receive bounces or complaints, +// or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for +// long-term storage. +type EventDestination struct { + _ struct{} `type:"structure"` + + // An object that defines an Amazon CloudWatch destination for email events. + // You can use Amazon CloudWatch to monitor and gain insights on your email + // sending metrics. + CloudWatchDestination *CloudWatchDestination `type:"structure"` + + // If true, the event destination is enabled. When the event destination is + // enabled, the specified event types are sent to the destinations in this EventDestinationDefinition. + // + // If false, the event destination is disabled. When the event destination is + // disabled, events aren't sent to the specified destinations. + Enabled *bool `type:"boolean"` + + // An object that defines an Amazon Kinesis Data Firehose destination for email + // events. You can use Amazon Kinesis Data Firehose to stream data to other + // services, such as Amazon S3 and Amazon Redshift. + KinesisFirehoseDestination *KinesisFirehoseDestination `type:"structure"` + + // The types of events that Amazon SES sends to the specified event destinations. + // + // MatchingEventTypes is a required field + MatchingEventTypes []EventType `type:"list" required:"true"` + + // A name that identifies the event destination. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // An object that defines an Amazon Pinpoint project destination for email events. + // You can send email event data to a Amazon Pinpoint project to view metrics + // using the Transactional Messaging dashboards that are built in to Amazon + // Pinpoint. For more information, see Transactional Messaging Charts (https://docs.aws.amazon.com/pinpoint/latest/userguide/analytics-transactional-messages.html) + // in the Amazon Pinpoint User Guide. + PinpointDestination *PinpointDestination `type:"structure"` + + // An object that defines an Amazon SNS destination for email events. You can + // use Amazon SNS to send notification when certain email events occur. + SnsDestination *SnsDestination `type:"structure"` +} + +// String returns the string representation +func (s EventDestination) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s EventDestination) MarshalFields(e protocol.FieldEncoder) error { + if s.CloudWatchDestination != nil { + v := s.CloudWatchDestination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CloudWatchDestination", v, metadata) + } + if s.Enabled != nil { + v := *s.Enabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Enabled", protocol.BoolValue(v), metadata) + } + if s.KinesisFirehoseDestination != nil { + v := s.KinesisFirehoseDestination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "KinesisFirehoseDestination", v, metadata) + } + if s.MatchingEventTypes != nil { + v := s.MatchingEventTypes + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "MatchingEventTypes", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PinpointDestination != nil { + v := s.PinpointDestination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "PinpointDestination", v, metadata) + } + if s.SnsDestination != nil { + v := s.SnsDestination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SnsDestination", v, metadata) + } + return nil +} + +// An object that defines the event destination. Specifically, it defines which +// services receive events from emails sent using the configuration set that +// the event destination is associated with. Also defines the types of events +// that are sent to the event destination. +type EventDestinationDefinition struct { + _ struct{} `type:"structure"` + + // An object that defines an Amazon CloudWatch destination for email events. + // You can use Amazon CloudWatch to monitor and gain insights on your email + // sending metrics. + CloudWatchDestination *CloudWatchDestination `type:"structure"` + + // If true, the event destination is enabled. When the event destination is + // enabled, the specified event types are sent to the destinations in this EventDestinationDefinition. + // + // If false, the event destination is disabled. When the event destination is + // disabled, events aren't sent to the specified destinations. + Enabled *bool `type:"boolean"` + + // An object that defines an Amazon Kinesis Data Firehose destination for email + // events. You can use Amazon Kinesis Data Firehose to stream data to other + // services, such as Amazon S3 and Amazon Redshift. + KinesisFirehoseDestination *KinesisFirehoseDestination `type:"structure"` + + // An array that specifies which events the Amazon SES API v2 should send to + // the destinations in this EventDestinationDefinition. + MatchingEventTypes []EventType `type:"list"` + + // An object that defines an Amazon Pinpoint project destination for email events. + // You can send email event data to a Amazon Pinpoint project to view metrics + // using the Transactional Messaging dashboards that are built in to Amazon + // Pinpoint. For more information, see Transactional Messaging Charts (https://docs.aws.amazon.com/pinpoint/latest/userguide/analytics-transactional-messages.html) + // in the Amazon Pinpoint User Guide. + PinpointDestination *PinpointDestination `type:"structure"` + + // An object that defines an Amazon SNS destination for email events. You can + // use Amazon SNS to send notification when certain email events occur. + SnsDestination *SnsDestination `type:"structure"` +} + +// String returns the string representation +func (s EventDestinationDefinition) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EventDestinationDefinition) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "EventDestinationDefinition"} + if s.CloudWatchDestination != nil { + if err := s.CloudWatchDestination.Validate(); err != nil { + invalidParams.AddNested("CloudWatchDestination", err.(aws.ErrInvalidParams)) + } + } + if s.KinesisFirehoseDestination != nil { + if err := s.KinesisFirehoseDestination.Validate(); err != nil { + invalidParams.AddNested("KinesisFirehoseDestination", err.(aws.ErrInvalidParams)) + } + } + if s.SnsDestination != nil { + if err := s.SnsDestination.Validate(); err != nil { + invalidParams.AddNested("SnsDestination", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s EventDestinationDefinition) MarshalFields(e protocol.FieldEncoder) error { + if s.CloudWatchDestination != nil { + v := s.CloudWatchDestination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "CloudWatchDestination", v, metadata) + } + if s.Enabled != nil { + v := *s.Enabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Enabled", protocol.BoolValue(v), metadata) + } + if s.KinesisFirehoseDestination != nil { + v := s.KinesisFirehoseDestination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "KinesisFirehoseDestination", v, metadata) + } + if s.MatchingEventTypes != nil { + v := s.MatchingEventTypes + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "MatchingEventTypes", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + if s.PinpointDestination != nil { + v := s.PinpointDestination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "PinpointDestination", v, metadata) + } + if s.SnsDestination != nil { + v := s.SnsDestination + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "SnsDestination", v, metadata) + } + return nil +} + +// Information about an email identity. +type IdentityInfo struct { + _ struct{} `type:"structure"` + + // The address or domain of the identity. + IdentityName *string `type:"string"` + + // The email identity type. The identity type can be one of the following: + // + // * EMAIL_ADDRESS – The identity is an email address. + // + // * DOMAIN – The identity is a domain. + // + // * MANAGED_DOMAIN – The identity is a domain that is managed by AWS. + IdentityType IdentityType `type:"string" enum:"true"` + + // Indicates whether or not you can send email from the identity. + // + // An identity is an email address or domain that you send email from. Before + // you can send email from an identity, you have to demostrate that you own + // the identity, and that you authorize Amazon SES to send email from that identity. + SendingEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s IdentityInfo) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s IdentityInfo) MarshalFields(e protocol.FieldEncoder) error { + if s.IdentityName != nil { + v := *s.IdentityName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IdentityName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.IdentityType) > 0 { + v := s.IdentityType + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IdentityType", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.SendingEnabled != nil { + v := *s.SendingEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SendingEnabled", protocol.BoolValue(v), metadata) + } + return nil +} + +// An object that contains information about the inbox placement data settings +// for a verified domain that’s associated with your AWS account. This data +// is available only if you enabled the Deliverability dashboard for the domain. +type InboxPlacementTrackingOption struct { + _ struct{} `type:"structure"` + + // Specifies whether inbox placement data is being tracked for the domain. + Global *bool `type:"boolean"` + + // An array of strings, one for each major email provider that the inbox placement + // data applies to. + TrackedIsps []string `type:"list"` +} + +// String returns the string representation +func (s InboxPlacementTrackingOption) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s InboxPlacementTrackingOption) MarshalFields(e protocol.FieldEncoder) error { + if s.Global != nil { + v := *s.Global + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Global", protocol.BoolValue(v), metadata) + } + if s.TrackedIsps != nil { + v := s.TrackedIsps + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "TrackedIsps", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddValue(protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v1)}) + } + ls0.End() + + } + return nil +} + +// An object that describes how email sent during the predictive inbox placement +// test was handled by a certain email provider. +type IspPlacement struct { + _ struct{} `type:"structure"` + + // The name of the email provider that the inbox placement data applies to. + IspName *string `type:"string"` + + // An object that contains inbox placement metrics for a specific email provider. + PlacementStatistics *PlacementStatistics `type:"structure"` +} + +// String returns the string representation +func (s IspPlacement) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s IspPlacement) MarshalFields(e protocol.FieldEncoder) error { + if s.IspName != nil { + v := *s.IspName + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IspName", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.PlacementStatistics != nil { + v := s.PlacementStatistics + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "PlacementStatistics", v, metadata) + } + return nil +} + +// An object that defines an Amazon Kinesis Data Firehose destination for email +// events. You can use Amazon Kinesis Data Firehose to stream data to other +// services, such as Amazon S3 and Amazon Redshift. +type KinesisFirehoseDestination struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream + // that the Amazon SES API v2 sends email events to. + // + // DeliveryStreamArn is a required field + DeliveryStreamArn *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role that the Amazon SES API v2 + // uses to send email events to the Amazon Kinesis Data Firehose stream. + // + // IamRoleArn is a required field + IamRoleArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s KinesisFirehoseDestination) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KinesisFirehoseDestination) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "KinesisFirehoseDestination"} + + if s.DeliveryStreamArn == nil { + invalidParams.Add(aws.NewErrParamRequired("DeliveryStreamArn")) + } + + if s.IamRoleArn == nil { + invalidParams.Add(aws.NewErrParamRequired("IamRoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s KinesisFirehoseDestination) MarshalFields(e protocol.FieldEncoder) error { + if s.DeliveryStreamArn != nil { + v := *s.DeliveryStreamArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DeliveryStreamArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.IamRoleArn != nil { + v := *s.IamRoleArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "IamRoleArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// A list of attributes that are associated with a MAIL FROM domain. +type MailFromAttributes struct { + _ struct{} `type:"structure"` + + // The action that you want to take if the required MX record can't be found + // when you send an email. When you set this value to UseDefaultValue, the mail + // is sent using amazonses.com as the MAIL FROM domain. When you set this value + // to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified + // error, and doesn't attempt to deliver the email. + // + // These behaviors are taken when the custom MAIL FROM domain configuration + // is in the Pending, Failed, and TemporaryFailure states. + // + // BehaviorOnMxFailure is a required field + BehaviorOnMxFailure BehaviorOnMxFailure `type:"string" required:"true" enum:"true"` + + // The name of a domain that an email identity uses as a custom MAIL FROM domain. + // + // MailFromDomain is a required field + MailFromDomain *string `type:"string" required:"true"` + + // The status of the MAIL FROM domain. This status can have the following values: + // + // * PENDING – Amazon SES hasn't started searching for the MX record yet. + // + // * SUCCESS – Amazon SES detected the required MX record for the MAIL + // FROM domain. + // + // * FAILED – Amazon SES can't find the required MX record, or the record + // no longer exists. + // + // * TEMPORARY_FAILURE – A temporary issue occurred, which prevented Amazon + // SES from determining the status of the MAIL FROM domain. + // + // MailFromDomainStatus is a required field + MailFromDomainStatus MailFromDomainStatus `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s MailFromAttributes) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MailFromAttributes) MarshalFields(e protocol.FieldEncoder) error { + if len(s.BehaviorOnMxFailure) > 0 { + v := s.BehaviorOnMxFailure + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "BehaviorOnMxFailure", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + if s.MailFromDomain != nil { + v := *s.MailFromDomain + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MailFromDomain", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if len(s.MailFromDomainStatus) > 0 { + v := s.MailFromDomainStatus + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MailFromDomainStatus", protocol.QuotedValue{ValueMarshaler: v}, metadata) + } + return nil +} + +// Represents the email message that you're sending. The Message object consists +// of a subject line and a message body. +type Message struct { + _ struct{} `type:"structure"` + + // The body of the message. You can specify an HTML version of the message, + // a text-only version of the message, or both. + // + // Body is a required field + Body *Body `type:"structure" required:"true"` + + // The subject line of the email. The subject line can only contain 7-bit ASCII + // characters. However, you can specify non-ASCII characters in the subject + // line by using encoded-word syntax, as described in RFC 2047 (https://tools.ietf.org/html/rfc2047). + // + // Subject is a required field + Subject *Content `type:"structure" required:"true"` +} + +// String returns the string representation +func (s Message) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Message) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "Message"} + + if s.Body == nil { + invalidParams.Add(aws.NewErrParamRequired("Body")) + } + + if s.Subject == nil { + invalidParams.Add(aws.NewErrParamRequired("Subject")) + } + if s.Body != nil { + if err := s.Body.Validate(); err != nil { + invalidParams.AddNested("Body", err.(aws.ErrInvalidParams)) + } + } + if s.Subject != nil { + if err := s.Subject.Validate(); err != nil { + invalidParams.AddNested("Subject", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Message) MarshalFields(e protocol.FieldEncoder) error { + if s.Body != nil { + v := s.Body + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Body", v, metadata) + } + if s.Subject != nil { + v := s.Subject + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "Subject", v, metadata) + } + return nil +} + +// Contains the name and value of a tag that you apply to an email. You can +// use message tags when you publish email sending events. +type MessageTag struct { + _ struct{} `type:"structure"` + + // The name of the message tag. The message tag name has to meet the following + // criteria: + // + // * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores + // (_), or dashes (-). + // + // * It can contain no more than 256 characters. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The value of the message tag. The message tag value has to meet the following + // criteria: + // + // * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores + // (_), or dashes (-). + // + // * It can contain no more than 256 characters. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MessageTag) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MessageTag) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "MessageTag"} + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + + if s.Value == nil { + invalidParams.Add(aws.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s MessageTag) MarshalFields(e protocol.FieldEncoder) error { + if s.Name != nil { + v := *s.Name + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Value != nil { + v := *s.Value + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Value", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An object that contains information about email that was sent from the selected +// domain. +type OverallVolume struct { + _ struct{} `type:"structure"` + + // An object that contains inbox and junk mail placement metrics for individual + // email providers. + DomainIspPlacements []DomainIspPlacement `type:"list"` + + // The percentage of emails that were sent from the domain that were read by + // their recipients. + ReadRatePercent *float64 `type:"double"` + + // An object that contains information about the numbers of messages that arrived + // in recipients' inboxes and junk mail folders. + VolumeStatistics *VolumeStatistics `type:"structure"` +} + +// String returns the string representation +func (s OverallVolume) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s OverallVolume) MarshalFields(e protocol.FieldEncoder) error { + if s.DomainIspPlacements != nil { + v := s.DomainIspPlacements + + metadata := protocol.Metadata{} + ls0 := e.List(protocol.BodyTarget, "DomainIspPlacements", metadata) + ls0.Start() + for _, v1 := range v { + ls0.ListAddFields(v1) + } + ls0.End() + + } + if s.ReadRatePercent != nil { + v := *s.ReadRatePercent + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ReadRatePercent", protocol.Float64Value(v), metadata) + } + if s.VolumeStatistics != nil { + v := s.VolumeStatistics + + metadata := protocol.Metadata{} + e.SetFields(protocol.BodyTarget, "VolumeStatistics", v, metadata) + } + return nil +} + +// An object that defines an Amazon Pinpoint project destination for email events. +// You can send email event data to a Amazon Pinpoint project to view metrics +// using the Transactional Messaging dashboards that are built in to Amazon +// Pinpoint. For more information, see Transactional Messaging Charts (https://docs.aws.amazon.com/pinpoint/latest/userguide/analytics-transactional-messages.html) +// in the Amazon Pinpoint User Guide. +type PinpointDestination struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want + // to send email events to. + ApplicationArn *string `type:"string"` +} + +// String returns the string representation +func (s PinpointDestination) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PinpointDestination) MarshalFields(e protocol.FieldEncoder) error { + if s.ApplicationArn != nil { + v := *s.ApplicationArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ApplicationArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An object that contains inbox placement data for an email provider. +type PlacementStatistics struct { + _ struct{} `type:"structure"` + + // The percentage of emails that were authenticated by using DomainKeys Identified + // Mail (DKIM) during the predictive inbox placement test. + DkimPercentage *float64 `type:"double"` + + // The percentage of emails that arrived in recipients' inboxes during the predictive + // inbox placement test. + InboxPercentage *float64 `type:"double"` + + // The percentage of emails that didn't arrive in recipients' inboxes at all + // during the predictive inbox placement test. + MissingPercentage *float64 `type:"double"` + + // The percentage of emails that arrived in recipients' spam or junk mail folders + // during the predictive inbox placement test. + SpamPercentage *float64 `type:"double"` + + // The percentage of emails that were authenticated by using Sender Policy Framework + // (SPF) during the predictive inbox placement test. + SpfPercentage *float64 `type:"double"` +} + +// String returns the string representation +func (s PlacementStatistics) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s PlacementStatistics) MarshalFields(e protocol.FieldEncoder) error { + if s.DkimPercentage != nil { + v := *s.DkimPercentage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "DkimPercentage", protocol.Float64Value(v), metadata) + } + if s.InboxPercentage != nil { + v := *s.InboxPercentage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "InboxPercentage", protocol.Float64Value(v), metadata) + } + if s.MissingPercentage != nil { + v := *s.MissingPercentage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MissingPercentage", protocol.Float64Value(v), metadata) + } + if s.SpamPercentage != nil { + v := *s.SpamPercentage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SpamPercentage", protocol.Float64Value(v), metadata) + } + if s.SpfPercentage != nil { + v := *s.SpfPercentage + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SpfPercentage", protocol.Float64Value(v), metadata) + } + return nil +} + +// The raw email message. +type RawMessage struct { + _ struct{} `type:"structure"` + + // The raw email message. The message has to meet the following criteria: + // + // * The message has to contain a header and a body, separated by one blank + // line. + // + // * All of the required header fields must be present in the message. + // + // * Each part of a multipart MIME message must be formatted properly. + // + // * Attachments must be in a file format that the Amazon SES API v2 supports. + // + // * The entire message must be Base64 encoded. + // + // * If any of the MIME parts in your message contain content that is outside + // of the 7-bit ASCII character range, you should encode that content to + // ensure that recipients' email clients render the message properly. + // + // * The length of any single line of text in the message can't exceed 1,000 + // characters. This restriction is defined in RFC 5321 (https://tools.ietf.org/html/rfc5321). + // + // Data is automatically base64 encoded/decoded by the SDK. + // + // Data is a required field + Data []byte `type:"blob" required:"true"` +} + +// String returns the string representation +func (s RawMessage) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RawMessage) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RawMessage"} + + if s.Data == nil { + invalidParams.Add(aws.NewErrParamRequired("Data")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s RawMessage) MarshalFields(e protocol.FieldEncoder) error { + if s.Data != nil { + v := s.Data + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Data", protocol.QuotedValue{ValueMarshaler: protocol.BytesValue(v)}, metadata) + } + return nil +} + +// Enable or disable collection of reputation metrics for emails that you send +// using this configuration set in the current AWS Region. +type ReputationOptions struct { + _ struct{} `type:"structure"` + + // The date and time (in Unix time) when the reputation metrics were last given + // a fresh start. When your account is given a fresh start, your reputation + // metrics are calculated starting from the date of the fresh start. + LastFreshStart *time.Time `type:"timestamp"` + + // If true, tracking of reputation metrics is enabled for the configuration + // set. If false, tracking of reputation metrics is disabled for the configuration + // set. + ReputationMetricsEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ReputationOptions) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s ReputationOptions) MarshalFields(e protocol.FieldEncoder) error { + if s.LastFreshStart != nil { + v := *s.LastFreshStart + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "LastFreshStart", + protocol.TimeValue{V: v, Format: protocol.UnixTimeFormatName, QuotedFormatTime: true}, metadata) + } + if s.ReputationMetricsEnabled != nil { + v := *s.ReputationMetricsEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ReputationMetricsEnabled", protocol.BoolValue(v), metadata) + } + return nil +} + +// An object that contains information about the per-day and per-second sending +// limits for your Amazon SES account in the current AWS Region. +type SendQuota struct { + _ struct{} `type:"structure"` + + // The maximum number of emails that you can send in the current AWS Region + // over a 24-hour period. This value is also called your sending quota. + Max24HourSend *float64 `type:"double"` + + // The maximum number of emails that you can send per second in the current + // AWS Region. This value is also called your maximum sending rate or your maximum + // TPS (transactions per second) rate. + MaxSendRate *float64 `type:"double"` + + // The number of emails sent from your Amazon SES account in the current AWS + // Region over the past 24 hours. + SentLast24Hours *float64 `type:"double"` +} + +// String returns the string representation +func (s SendQuota) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s SendQuota) MarshalFields(e protocol.FieldEncoder) error { + if s.Max24HourSend != nil { + v := *s.Max24HourSend + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Max24HourSend", protocol.Float64Value(v), metadata) + } + if s.MaxSendRate != nil { + v := *s.MaxSendRate + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "MaxSendRate", protocol.Float64Value(v), metadata) + } + if s.SentLast24Hours != nil { + v := *s.SentLast24Hours + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SentLast24Hours", protocol.Float64Value(v), metadata) + } + return nil +} + +// Used to enable or disable email sending for messages that use this configuration +// set in the current AWS Region. +type SendingOptions struct { + _ struct{} `type:"structure"` + + // If true, email sending is enabled for the configuration set. If false, email + // sending is disabled for the configuration set. + SendingEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s SendingOptions) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s SendingOptions) MarshalFields(e protocol.FieldEncoder) error { + if s.SendingEnabled != nil { + v := *s.SendingEnabled + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SendingEnabled", protocol.BoolValue(v), metadata) + } + return nil +} + +// An object that defines an Amazon SNS destination for email events. You can +// use Amazon SNS to send notification when certain email events occur. +type SnsDestination struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish + // email events to. For more information about Amazon SNS topics, see the Amazon + // SNS Developer Guide (https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + // + // TopicArn is a required field + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SnsDestination) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SnsDestination) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SnsDestination"} + + if s.TopicArn == nil { + invalidParams.Add(aws.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s SnsDestination) MarshalFields(e protocol.FieldEncoder) error { + if s.TopicArn != nil { + v := *s.TopicArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TopicArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An object that defines the tags that are associated with a resource. A tag +// is a label that you optionally define and associate with a resource. Tags +// can help you categorize and manage resources in different ways, such as by +// purpose, owner, environment, or other criteria. A resource can have as many +// as 50 tags. +// +// Each tag consists of a required tag key and an associated tag value, both +// of which you define. A tag key is a general label that acts as a category +// for a more specific tag value. A tag value acts as a descriptor within a +// tag key. A tag key can contain as many as 128 characters. A tag value can +// contain as many as 256 characters. The characters can be Unicode letters, +// digits, white space, or one of the following symbols: _ . : / = + -. The +// following additional restrictions apply to tags: +// +// * Tag keys and values are case sensitive. +// +// * For each associated resource, each tag key must be unique and it can +// have only one value. +// +// * The aws: prefix is reserved for use by AWS; you can’t use it in any +// tag keys or values that you define. In addition, you can't edit or remove +// tag keys or values that use this prefix. Tags that use this prefix don’t +// count against the limit of 50 tags per resource. +// +// * You can associate tags with public or shared resources, but the tags +// are available only for your AWS account, not any other accounts that share +// the resource. In addition, the tags are available only for resources that +// are located in the specified AWS Region for your AWS account. +type Tag struct { + _ struct{} `type:"structure"` + + // One part of a key-value pair that defines a tag. The maximum length of a + // tag key is 128 characters. The minimum length is 1 character. + // + // Key is a required field + Key *string `type:"string" required:"true"` + + // The optional part of a key-value pair that defines a tag. The maximum length + // of a tag value is 256 characters. The minimum length is 0 characters. If + // you don't want a resource to have a specific tag value, don't specify a value + // for this parameter. If you don't specify a value, Amazon SES sets the value + // to an empty string. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "Tag"} + + if s.Key == nil { + invalidParams.Add(aws.NewErrParamRequired("Key")) + } + + if s.Value == nil { + invalidParams.Add(aws.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Tag) MarshalFields(e protocol.FieldEncoder) error { + if s.Key != nil { + v := *s.Key + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Key", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.Value != nil { + v := *s.Value + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "Value", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An object that defines the email template to use for an email message, and +// the values to use for any message variables in that template. An email template +// is a type of message template that contains content that you want to define, +// save, and reuse in email messages that you send. +type Template struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the template. + TemplateArn *string `type:"string"` + + // An object that defines the values to use for message variables in the template. + // This object is a set of key-value pairs. Each key defines a message variable + // in the template. The corresponding value defines the value to use for that + // variable. + TemplateData *string `type:"string"` +} + +// String returns the string representation +func (s Template) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s Template) MarshalFields(e protocol.FieldEncoder) error { + if s.TemplateArn != nil { + v := *s.TemplateArn + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + if s.TemplateData != nil { + v := *s.TemplateData + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "TemplateData", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An object that defines the tracking options for a configuration set. When +// you use the Amazon SES API v2 to send an email, it contains an invisible +// image that's used to track when recipients open your email. If your email +// contains links, those links are changed slightly in order to track when recipients +// click them. +// +// These images and links include references to a domain operated by AWS. You +// can optionally configure the Amazon SES to use a domain that you operate +// for these images and links. +type TrackingOptions struct { + _ struct{} `type:"structure"` + + // The domain that you want to use for tracking open and click events. + // + // CustomRedirectDomain is a required field + CustomRedirectDomain *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s TrackingOptions) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TrackingOptions) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TrackingOptions"} + + if s.CustomRedirectDomain == nil { + invalidParams.Add(aws.NewErrParamRequired("CustomRedirectDomain")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s TrackingOptions) MarshalFields(e protocol.FieldEncoder) error { + if s.CustomRedirectDomain != nil { + v := *s.CustomRedirectDomain + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "CustomRedirectDomain", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata) + } + return nil +} + +// An object that contains information about the amount of email that was delivered +// to recipients. +type VolumeStatistics struct { + _ struct{} `type:"structure"` + + // The total number of emails that arrived in recipients' inboxes. + InboxRawCount *int64 `type:"long"` + + // An estimate of the percentage of emails sent from the current domain that + // will arrive in recipients' inboxes. + ProjectedInbox *int64 `type:"long"` + + // An estimate of the percentage of emails sent from the current domain that + // will arrive in recipients' spam or junk mail folders. + ProjectedSpam *int64 `type:"long"` + + // The total number of emails that arrived in recipients' spam or junk mail + // folders. + SpamRawCount *int64 `type:"long"` +} + +// String returns the string representation +func (s VolumeStatistics) String() string { + return awsutil.Prettify(s) +} + +// MarshalFields encodes the AWS API shape using the passed in protocol encoder. +func (s VolumeStatistics) MarshalFields(e protocol.FieldEncoder) error { + if s.InboxRawCount != nil { + v := *s.InboxRawCount + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "InboxRawCount", protocol.Int64Value(v), metadata) + } + if s.ProjectedInbox != nil { + v := *s.ProjectedInbox + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ProjectedInbox", protocol.Int64Value(v), metadata) + } + if s.ProjectedSpam != nil { + v := *s.ProjectedSpam + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "ProjectedSpam", protocol.Int64Value(v), metadata) + } + if s.SpamRawCount != nil { + v := *s.SpamRawCount + + metadata := protocol.Metadata{} + e.SetValue(protocol.BodyTarget, "SpamRawCount", protocol.Int64Value(v), metadata) + } + return nil +} diff --git a/service/sesv2/sesv2iface/interface.go b/service/sesv2/sesv2iface/interface.go new file mode 100644 index 00000000000..d9c84444e9e --- /dev/null +++ b/service/sesv2/sesv2iface/interface.go @@ -0,0 +1,149 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sesv2iface provides an interface to enable mocking the Amazon Simple Email Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package sesv2iface + +import ( + "github.com/aws/aws-sdk-go-v2/service/sesv2" +) + +// ClientAPI provides an interface to enable mocking the +// sesv2.Client methods. This make unit testing your code that +// calls out to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // Amazon SES V2. +// func myFunc(svc sesv2iface.ClientAPI) bool { +// // Make svc.CreateConfigurationSet request +// } +// +// func main() { +// cfg, err := external.LoadDefaultAWSConfig() +// if err != nil { +// panic("failed to load config, " + err.Error()) +// } +// +// svc := sesv2.New(cfg) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockClientClient struct { +// sesv2iface.ClientPI +// } +// func (m *mockClientClient) CreateConfigurationSet(input *sesv2.CreateConfigurationSetInput) (*sesv2.CreateConfigurationSetOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockClientClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type ClientAPI interface { + CreateConfigurationSetRequest(*sesv2.CreateConfigurationSetInput) sesv2.CreateConfigurationSetRequest + + CreateConfigurationSetEventDestinationRequest(*sesv2.CreateConfigurationSetEventDestinationInput) sesv2.CreateConfigurationSetEventDestinationRequest + + CreateDedicatedIpPoolRequest(*sesv2.CreateDedicatedIpPoolInput) sesv2.CreateDedicatedIpPoolRequest + + CreateDeliverabilityTestReportRequest(*sesv2.CreateDeliverabilityTestReportInput) sesv2.CreateDeliverabilityTestReportRequest + + CreateEmailIdentityRequest(*sesv2.CreateEmailIdentityInput) sesv2.CreateEmailIdentityRequest + + DeleteConfigurationSetRequest(*sesv2.DeleteConfigurationSetInput) sesv2.DeleteConfigurationSetRequest + + DeleteConfigurationSetEventDestinationRequest(*sesv2.DeleteConfigurationSetEventDestinationInput) sesv2.DeleteConfigurationSetEventDestinationRequest + + DeleteDedicatedIpPoolRequest(*sesv2.DeleteDedicatedIpPoolInput) sesv2.DeleteDedicatedIpPoolRequest + + DeleteEmailIdentityRequest(*sesv2.DeleteEmailIdentityInput) sesv2.DeleteEmailIdentityRequest + + GetAccountRequest(*sesv2.GetAccountInput) sesv2.GetAccountRequest + + GetBlacklistReportsRequest(*sesv2.GetBlacklistReportsInput) sesv2.GetBlacklistReportsRequest + + GetConfigurationSetRequest(*sesv2.GetConfigurationSetInput) sesv2.GetConfigurationSetRequest + + GetConfigurationSetEventDestinationsRequest(*sesv2.GetConfigurationSetEventDestinationsInput) sesv2.GetConfigurationSetEventDestinationsRequest + + GetDedicatedIpRequest(*sesv2.GetDedicatedIpInput) sesv2.GetDedicatedIpRequest + + GetDedicatedIpsRequest(*sesv2.GetDedicatedIpsInput) sesv2.GetDedicatedIpsRequest + + GetDeliverabilityDashboardOptionsRequest(*sesv2.GetDeliverabilityDashboardOptionsInput) sesv2.GetDeliverabilityDashboardOptionsRequest + + GetDeliverabilityTestReportRequest(*sesv2.GetDeliverabilityTestReportInput) sesv2.GetDeliverabilityTestReportRequest + + GetDomainDeliverabilityCampaignRequest(*sesv2.GetDomainDeliverabilityCampaignInput) sesv2.GetDomainDeliverabilityCampaignRequest + + GetDomainStatisticsReportRequest(*sesv2.GetDomainStatisticsReportInput) sesv2.GetDomainStatisticsReportRequest + + GetEmailIdentityRequest(*sesv2.GetEmailIdentityInput) sesv2.GetEmailIdentityRequest + + ListConfigurationSetsRequest(*sesv2.ListConfigurationSetsInput) sesv2.ListConfigurationSetsRequest + + ListDedicatedIpPoolsRequest(*sesv2.ListDedicatedIpPoolsInput) sesv2.ListDedicatedIpPoolsRequest + + ListDeliverabilityTestReportsRequest(*sesv2.ListDeliverabilityTestReportsInput) sesv2.ListDeliverabilityTestReportsRequest + + ListDomainDeliverabilityCampaignsRequest(*sesv2.ListDomainDeliverabilityCampaignsInput) sesv2.ListDomainDeliverabilityCampaignsRequest + + ListEmailIdentitiesRequest(*sesv2.ListEmailIdentitiesInput) sesv2.ListEmailIdentitiesRequest + + ListTagsForResourceRequest(*sesv2.ListTagsForResourceInput) sesv2.ListTagsForResourceRequest + + PutAccountDedicatedIpWarmupAttributesRequest(*sesv2.PutAccountDedicatedIpWarmupAttributesInput) sesv2.PutAccountDedicatedIpWarmupAttributesRequest + + PutAccountSendingAttributesRequest(*sesv2.PutAccountSendingAttributesInput) sesv2.PutAccountSendingAttributesRequest + + PutConfigurationSetDeliveryOptionsRequest(*sesv2.PutConfigurationSetDeliveryOptionsInput) sesv2.PutConfigurationSetDeliveryOptionsRequest + + PutConfigurationSetReputationOptionsRequest(*sesv2.PutConfigurationSetReputationOptionsInput) sesv2.PutConfigurationSetReputationOptionsRequest + + PutConfigurationSetSendingOptionsRequest(*sesv2.PutConfigurationSetSendingOptionsInput) sesv2.PutConfigurationSetSendingOptionsRequest + + PutConfigurationSetTrackingOptionsRequest(*sesv2.PutConfigurationSetTrackingOptionsInput) sesv2.PutConfigurationSetTrackingOptionsRequest + + PutDedicatedIpInPoolRequest(*sesv2.PutDedicatedIpInPoolInput) sesv2.PutDedicatedIpInPoolRequest + + PutDedicatedIpWarmupAttributesRequest(*sesv2.PutDedicatedIpWarmupAttributesInput) sesv2.PutDedicatedIpWarmupAttributesRequest + + PutDeliverabilityDashboardOptionRequest(*sesv2.PutDeliverabilityDashboardOptionInput) sesv2.PutDeliverabilityDashboardOptionRequest + + PutEmailIdentityDkimAttributesRequest(*sesv2.PutEmailIdentityDkimAttributesInput) sesv2.PutEmailIdentityDkimAttributesRequest + + PutEmailIdentityFeedbackAttributesRequest(*sesv2.PutEmailIdentityFeedbackAttributesInput) sesv2.PutEmailIdentityFeedbackAttributesRequest + + PutEmailIdentityMailFromAttributesRequest(*sesv2.PutEmailIdentityMailFromAttributesInput) sesv2.PutEmailIdentityMailFromAttributesRequest + + SendEmailRequest(*sesv2.SendEmailInput) sesv2.SendEmailRequest + + TagResourceRequest(*sesv2.TagResourceInput) sesv2.TagResourceRequest + + UntagResourceRequest(*sesv2.UntagResourceInput) sesv2.UntagResourceRequest + + UpdateConfigurationSetEventDestinationRequest(*sesv2.UpdateConfigurationSetEventDestinationInput) sesv2.UpdateConfigurationSetEventDestinationRequest +} + +var _ ClientAPI = (*sesv2.Client)(nil) diff --git a/service/ssm/api_enums.go b/service/ssm/api_enums.go index c7a147775ba..31d3456d246 100644 --- a/service/ssm/api_enums.go +++ b/service/ssm/api_enums.go @@ -137,6 +137,7 @@ type AttachmentsSourceKey string // Enum values for AttachmentsSourceKey const ( AttachmentsSourceKeySourceUrl AttachmentsSourceKey = "SourceUrl" + AttachmentsSourceKeyS3fileUrl AttachmentsSourceKey = "S3FileUrl" ) func (enum AttachmentsSourceKey) MarshalValue() (string, error) { @@ -858,6 +859,8 @@ const ( OpsItemFilterKeyOperationalDataValue OpsItemFilterKey = "OperationalDataValue" OpsItemFilterKeyResourceId OpsItemFilterKey = "ResourceId" OpsItemFilterKeyAutomationId OpsItemFilterKey = "AutomationId" + OpsItemFilterKeyCategory OpsItemFilterKey = "Category" + OpsItemFilterKeySeverity OpsItemFilterKey = "Severity" ) func (enum OpsItemFilterKey) MarshalValue() (string, error) { diff --git a/service/ssm/api_op_CreateOpsItem.go b/service/ssm/api_op_CreateOpsItem.go index 3687d0c50b2..05169d00148 100644 --- a/service/ssm/api_op_CreateOpsItem.go +++ b/service/ssm/api_op_CreateOpsItem.go @@ -13,6 +13,9 @@ import ( type CreateOpsItemInput struct { _ struct{} `type:"structure"` + // Specify a category to assign to an OpsItem. + Category *string `min:"1" type:"string"` + // Information about the OpsItem. // // Description is a required field @@ -53,6 +56,9 @@ type CreateOpsItemInput struct { // impacted resources, or statuses for the impacted resource. RelatedOpsItems []RelatedOpsItem `type:"list"` + // Specify a severity to assign to an OpsItem. + Severity *string `min:"1" type:"string"` + // The origin of the OpsItem, such as Amazon EC2 or AWS Systems Manager. // // Source is a required field @@ -85,6 +91,9 @@ func (s CreateOpsItemInput) String() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateOpsItemInput) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CreateOpsItemInput"} + if s.Category != nil && len(*s.Category) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Category", 1)) + } if s.Description == nil { invalidParams.Add(aws.NewErrParamRequired("Description")) @@ -95,6 +104,9 @@ func (s *CreateOpsItemInput) Validate() error { if s.Priority != nil && *s.Priority < 1 { invalidParams.Add(aws.NewErrParamMinValue("Priority", 1)) } + if s.Severity != nil && len(*s.Severity) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Severity", 1)) + } if s.Source == nil { invalidParams.Add(aws.NewErrParamRequired("Source")) diff --git a/service/ssm/api_op_CreateResourceDataSync.go b/service/ssm/api_op_CreateResourceDataSync.go index 9ea8eeaae4a..91f30d05892 100644 --- a/service/ssm/api_op_CreateResourceDataSync.go +++ b/service/ssm/api_op_CreateResourceDataSync.go @@ -13,14 +13,21 @@ type CreateResourceDataSyncInput struct { _ struct{} `type:"structure"` // Amazon S3 configuration details for the sync. - // - // S3Destination is a required field - S3Destination *ResourceDataSyncS3Destination `type:"structure" required:"true"` + S3Destination *ResourceDataSyncS3Destination `type:"structure"` // A name for the configuration. // // SyncName is a required field SyncName *string `min:"1" type:"string" required:"true"` + + // Specify information about the data sources to synchronize. + SyncSource *ResourceDataSyncSource `type:"structure"` + + // Specify SyncToDestination to create a resource data sync that synchronizes + // data from multiple AWS Regions to an Amazon S3 bucket. Specify SyncFromSource + // to synchronize data from multiple AWS accounts and Regions, as listed in + // AWS Organizations. + SyncType *string `min:"1" type:"string"` } // String returns the string representation @@ -32,21 +39,25 @@ func (s CreateResourceDataSyncInput) String() string { func (s *CreateResourceDataSyncInput) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "CreateResourceDataSyncInput"} - if s.S3Destination == nil { - invalidParams.Add(aws.NewErrParamRequired("S3Destination")) - } - if s.SyncName == nil { invalidParams.Add(aws.NewErrParamRequired("SyncName")) } if s.SyncName != nil && len(*s.SyncName) < 1 { invalidParams.Add(aws.NewErrParamMinLen("SyncName", 1)) } + if s.SyncType != nil && len(*s.SyncType) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SyncType", 1)) + } if s.S3Destination != nil { if err := s.S3Destination.Validate(); err != nil { invalidParams.AddNested("S3Destination", err.(aws.ErrInvalidParams)) } } + if s.SyncSource != nil { + if err := s.SyncSource.Validate(); err != nil { + invalidParams.AddNested("SyncSource", err.(aws.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -68,17 +79,33 @@ const opCreateResourceDataSync = "CreateResourceDataSync" // CreateResourceDataSyncRequest returns a request value for making API operation for // Amazon Simple Systems Manager (SSM). // -// Creates a resource data sync configuration to a single bucket in Amazon S3. -// This is an asynchronous operation that returns immediately. After a successful -// initial sync is completed, the system continuously syncs data to the Amazon -// S3 bucket. To check the status of the sync, use the ListResourceDataSync. +// A resource data sync helps you view data from multiple sources in a single +// location. Systems Manager offers two types of resource data sync: SyncToDestination +// and SyncFromSource. +// +// You can configure Systems Manager Inventory to use the SyncToDestination +// type to synchronize Inventory data from multiple AWS Regions to a single +// Amazon S3 bucket. For more information, see Configuring Resource Data Sync +// for Inventory (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-datasync.html) +// in the AWS Systems Manager User Guide. +// +// You can configure Systems Manager Explorer to use the SyncToDestination type +// to synchronize operational work items (OpsItems) and operational data (OpsData) +// from multiple AWS Regions to a single Amazon S3 bucket. You can also configure +// Explorer to use the SyncFromSource type. This type synchronizes OpsItems +// and OpsData from multiple AWS accounts and Regions by using AWS Organizations. +// For more information, see Setting Up Explorer to Display Data from Multiple +// Accounts and Regions (http://docs.aws.amazon.com/systems-manager/latest/userguide/Explorer-resource-data-sync.html) +// in the AWS Systems Manager User Guide. +// +// A resource data sync is an asynchronous operation that returns immediately. +// After a successful initial sync is completed, the system continuously syncs +// data. To check the status of a sync, use the ListResourceDataSync. // // By default, data is not encrypted in Amazon S3. We strongly recommend that // you enable encryption in Amazon S3 to ensure secure data storage. We also // recommend that you secure access to the Amazon S3 bucket by creating a restrictive -// bucket policy. For more information, see Configuring Resource Data Sync for -// Inventory (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-datasync.html) -// in the AWS Systems Manager User Guide. +// bucket policy. // // // Example sending a request using CreateResourceDataSyncRequest. // req := client.CreateResourceDataSyncRequest(params) diff --git a/service/ssm/api_op_DeleteResourceDataSync.go b/service/ssm/api_op_DeleteResourceDataSync.go index 46f41612776..b8f9f8f5942 100644 --- a/service/ssm/api_op_DeleteResourceDataSync.go +++ b/service/ssm/api_op_DeleteResourceDataSync.go @@ -16,6 +16,9 @@ type DeleteResourceDataSyncInput struct { // // SyncName is a required field SyncName *string `min:"1" type:"string" required:"true"` + + // Specify the type of resource data sync to delete. + SyncType *string `min:"1" type:"string"` } // String returns the string representation @@ -33,6 +36,9 @@ func (s *DeleteResourceDataSyncInput) Validate() error { if s.SyncName != nil && len(*s.SyncName) < 1 { invalidParams.Add(aws.NewErrParamMinLen("SyncName", 1)) } + if s.SyncType != nil && len(*s.SyncType) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SyncType", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -55,9 +61,8 @@ const opDeleteResourceDataSync = "DeleteResourceDataSync" // Amazon Simple Systems Manager (SSM). // // Deletes a Resource Data Sync configuration. After the configuration is deleted, -// changes to inventory data on managed instances are no longer synced with -// the target Amazon S3 bucket. Deleting a sync configuration does not delete -// data in the target Amazon S3 bucket. +// changes to data on managed instances are no longer synced to or from the +// target. Deleting a sync configuration does not delete data. // // // Example sending a request using DeleteResourceDataSyncRequest. // req := client.DeleteResourceDataSyncRequest(params) diff --git a/service/ssm/api_op_DescribeParameters.go b/service/ssm/api_op_DescribeParameters.go index 4637470cd4b..dd8279a01b2 100644 --- a/service/ssm/api_op_DescribeParameters.go +++ b/service/ssm/api_op_DescribeParameters.go @@ -13,7 +13,7 @@ import ( type DescribeParametersInput struct { _ struct{} `type:"structure"` - // One or more filters. Use a filter to return a more specific list of results. + // This data type is deprecated. Instead, use ParameterFilters. Filters []ParametersFilter `type:"list"` // The maximum number of items to return for this call. The call also returns diff --git a/service/ssm/api_op_GetOpsSummary.go b/service/ssm/api_op_GetOpsSummary.go index 7d587c8a2bc..c7a9e15b11c 100644 --- a/service/ssm/api_op_GetOpsSummary.go +++ b/service/ssm/api_op_GetOpsSummary.go @@ -15,9 +15,7 @@ type GetOpsSummaryInput struct { // Optional aggregators that return counts of OpsItems based on one or more // expressions. - // - // Aggregators is a required field - Aggregators []OpsAggregator `min:"1" type:"list" required:"true"` + Aggregators []OpsAggregator `min:"1" type:"list"` // Optional filters used to scope down the returned OpsItems. Filters []OpsFilter `min:"1" type:"list"` @@ -29,6 +27,12 @@ type GetOpsSummaryInput struct { // A token to start the list. Use this token to get the next set of results. NextToken *string `type:"string"` + + // The OpsItem data type to return. + ResultAttributes []OpsResultAttribute `min:"1" type:"list"` + + // Specify the name of a resource data sync to get. + SyncName *string `min:"1" type:"string"` } // String returns the string representation @@ -39,10 +43,6 @@ func (s GetOpsSummaryInput) String() string { // Validate inspects the fields of the type to determine if they are valid. func (s *GetOpsSummaryInput) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "GetOpsSummaryInput"} - - if s.Aggregators == nil { - invalidParams.Add(aws.NewErrParamRequired("Aggregators")) - } if s.Aggregators != nil && len(s.Aggregators) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Aggregators", 1)) } @@ -52,6 +52,12 @@ func (s *GetOpsSummaryInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) } + if s.ResultAttributes != nil && len(s.ResultAttributes) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ResultAttributes", 1)) + } + if s.SyncName != nil && len(*s.SyncName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SyncName", 1)) + } if s.Aggregators != nil { for i, v := range s.Aggregators { if err := v.Validate(); err != nil { @@ -66,6 +72,13 @@ func (s *GetOpsSummaryInput) Validate() error { } } } + if s.ResultAttributes != nil { + for i, v := range s.ResultAttributes { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ResultAttributes", i), err.(aws.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams diff --git a/service/ssm/api_op_GetParametersByPath.go b/service/ssm/api_op_GetParametersByPath.go index 3bc8726a598..a2190ae4b5b 100644 --- a/service/ssm/api_op_GetParametersByPath.go +++ b/service/ssm/api_op_GetParametersByPath.go @@ -22,8 +22,6 @@ type GetParametersByPathInput struct { NextToken *string `type:"string"` // Filters to limit the request results. - // - // You can't filter using the parameter name. ParameterFilters []ParameterStringFilter `type:"list"` // The hierarchy for the parameter. Hierarchies start with a forward slash (/) @@ -99,9 +97,7 @@ const opGetParametersByPath = "GetParametersByPath" // GetParametersByPathRequest returns a request value for making API operation for // Amazon Simple Systems Manager (SSM). // -// Retrieve parameters in a specific hierarchy. For more information, see Working -// with Systems Manager Parameters (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-working.html) -// in the AWS Systems Manager User Guide. +// Retrieve information about one or more parameters in a specific hierarchy. // // Request results are returned on a best-effort basis. If you specify MaxResults // in the request, the response includes information up to the limit specified. @@ -111,8 +107,6 @@ const opGetParametersByPath = "GetParametersByPath" // that point and a NextToken. You can specify the NextToken in a subsequent // call to get the next set of results. // -// This API action doesn't support filtering by tags. -// // // Example sending a request using GetParametersByPathRequest. // req := client.GetParametersByPathRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/ssm/api_op_ListCommandInvocations.go b/service/ssm/api_op_ListCommandInvocations.go index f56f0b9d9dc..2c70783207e 100644 --- a/service/ssm/api_op_ListCommandInvocations.go +++ b/service/ssm/api_op_ListCommandInvocations.go @@ -21,7 +21,7 @@ type ListCommandInvocationsInput struct { Details *bool `type:"boolean"` // (Optional) One or more filters. Use a filter to return a more specific list - // of results. + // of results. Note that the DocumentName filter is not supported for ListCommandInvocations. Filters []CommandFilter `min:"1" type:"list"` // (Optional) The command execution details for a specific instance ID. diff --git a/service/ssm/api_op_ListResourceDataSync.go b/service/ssm/api_op_ListResourceDataSync.go index 3559f513e7a..2901d2d81fd 100644 --- a/service/ssm/api_op_ListResourceDataSync.go +++ b/service/ssm/api_op_ListResourceDataSync.go @@ -19,6 +19,12 @@ type ListResourceDataSyncInput struct { // A token to start the list. Use this token to get the next set of results. NextToken *string `type:"string"` + + // View a list of resource data syncs according to the sync type. Specify SyncToDestination + // to view resource data syncs that synchronize data to an Amazon S3 buckets. + // Specify SyncFromSource to view resource data syncs from AWS Organizations + // or from multiple AWS Regions. + SyncType *string `min:"1" type:"string"` } // String returns the string representation @@ -32,6 +38,9 @@ func (s *ListResourceDataSyncInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(aws.NewErrParamMinValue("MaxResults", 1)) } + if s.SyncType != nil && len(*s.SyncType) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SyncType", 1)) + } if invalidParams.Len() > 0 { return invalidParams diff --git a/service/ssm/api_op_PutInventory.go b/service/ssm/api_op_PutInventory.go index c5350bd92b4..13fe7a8d36d 100644 --- a/service/ssm/api_op_PutInventory.go +++ b/service/ssm/api_op_PutInventory.go @@ -13,7 +13,7 @@ import ( type PutInventoryInput struct { _ struct{} `type:"structure"` - // One or more instance IDs where you want to add or update inventory items. + // An instance ID where you want to add or update inventory items. // // InstanceId is a required field InstanceId *string `type:"string" required:"true"` diff --git a/service/ssm/api_op_UpdateOpsItem.go b/service/ssm/api_op_UpdateOpsItem.go index 6c18b388519..5f48f7645f4 100644 --- a/service/ssm/api_op_UpdateOpsItem.go +++ b/service/ssm/api_op_UpdateOpsItem.go @@ -13,6 +13,9 @@ import ( type UpdateOpsItemInput struct { _ struct{} `type:"structure"` + // Specify a new category for an OpsItem. + Category *string `min:"1" type:"string"` + // Update the information about the OpsItem. Provide enough information so that // users reading this OpsItem for the first time understand the issue. Description *string `min:"1" type:"string"` @@ -63,6 +66,9 @@ type UpdateOpsItemInput struct { // impacted resources, or statuses for the impacted resource. RelatedOpsItems []RelatedOpsItem `type:"list"` + // Specify a new severity for an OpsItem. + Severity *string `min:"1" type:"string"` + // The OpsItem status. Status can be Open, In Progress, or Resolved. For more // information, see Editing OpsItem Details (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-working-with-OpsItems-editing-details.html) // in the AWS Systems Manager User Guide. @@ -81,6 +87,9 @@ func (s UpdateOpsItemInput) String() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateOpsItemInput) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "UpdateOpsItemInput"} + if s.Category != nil && len(*s.Category) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Category", 1)) + } if s.Description != nil && len(*s.Description) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Description", 1)) } @@ -91,6 +100,9 @@ func (s *UpdateOpsItemInput) Validate() error { if s.Priority != nil && *s.Priority < 1 { invalidParams.Add(aws.NewErrParamMinValue("Priority", 1)) } + if s.Severity != nil && len(*s.Severity) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Severity", 1)) + } if s.Title != nil && len(*s.Title) < 1 { invalidParams.Add(aws.NewErrParamMinLen("Title", 1)) } diff --git a/service/ssm/api_types.go b/service/ssm/api_types.go index b2fd22ca714..ebb0a72e261 100644 --- a/service/ssm/api_types.go +++ b/service/ssm/api_types.go @@ -574,16 +574,21 @@ func (s AttachmentInformation) String() string { return awsutil.Prettify(s) } -// A key and value pair that identifies the location of an attachment to a document. +// Identifying information about a document attachment, including the file name +// and a key-value pair that identifies the location of an attachment to a document. type AttachmentsSource struct { _ struct{} `type:"structure"` - // The key of a key and value pair that identifies the location of an attachment + // The key of a key-value pair that identifies the location of an attachment // to a document. Key AttachmentsSourceKey `type:"string" enum:"true"` - // The URL of the location of a document attachment, such as the URL of an Amazon - // S3 bucket. + // The name of the document attachment file. + Name *string `type:"string"` + + // The value of a key-value pair that identifies the location of an attachment + // to a document. The format is the URL of the location of a document attachment, + // such as the URL of an Amazon S3 bucket. Values []string `min:"1" type:"list"` } @@ -3637,6 +3642,9 @@ func (s OpsEntity) String() string { type OpsEntityItem struct { _ struct{} `type:"structure"` + // The time OpsItem data was captured. + CaptureTime *string `type:"string"` + // The detailed data content for an OpsItem summaries result item. Content []map[string]string `type:"list"` } @@ -3701,6 +3709,10 @@ func (s *OpsFilter) Validate() error { type OpsItem struct { _ struct{} `type:"structure"` + // An OpsItem category. Category options include: Availability, Cost, Performance, + // Recovery, Security. + Category *string `min:"1" type:"string"` + // The ARN of the AWS account that created the OpsItem. CreatedBy *string `type:"string"` @@ -3754,6 +3766,9 @@ type OpsItem struct { // impacted resources, or statuses for the impacted resource. RelatedOpsItems []RelatedOpsItem `type:"list"` + // The severity of the OpsItem. Severity options range from 1 to 4. + Severity *string `min:"1" type:"string"` + // The origin of the OpsItem, such as Amazon EC2 or AWS Systems Manager. The // impacted resource is a subset of source. Source *string `min:"1" type:"string"` @@ -3857,6 +3872,9 @@ func (s OpsItemNotification) String() string { type OpsItemSummary struct { _ struct{} `type:"structure"` + // A list of OpsItems by category. + Category *string `min:"1" type:"string"` + // The Amazon Resource Name (ARN) of the IAM entity that created the OpsItem. CreatedBy *string `type:"string"` @@ -3879,6 +3897,9 @@ type OpsItemSummary struct { // The importance of this OpsItem in relation to other OpsItems in the system. Priority *int64 `min:"1" type:"integer"` + // A list of OpsItems by severity. + Severity *string `min:"1" type:"string"` + // The impacted AWS resource. Source *string `min:"1" type:"string"` @@ -3895,6 +3916,39 @@ func (s OpsItemSummary) String() string { return awsutil.Prettify(s) } +// The OpsItem data type to return. +type OpsResultAttribute struct { + _ struct{} `type:"structure"` + + // Name of the data type. Valid value: AWS:OpsItem, AWS:EC2InstanceInformation, + // AWS:OpsItemTrendline, or AWS:ComplianceSummary. + // + // TypeName is a required field + TypeName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s OpsResultAttribute) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OpsResultAttribute) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "OpsResultAttribute"} + + if s.TypeName == nil { + invalidParams.Add(aws.NewErrParamRequired("TypeName")) + } + if s.TypeName != nil && len(*s.TypeName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("TypeName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Information about the source where the association execution details are // stored. type OutputSource struct { @@ -4075,9 +4129,19 @@ func (s ParameterMetadata) String() string { // One or more filters. Use a filter to return a more specific list of results. // -// The Name and Tier filter keys can't be used with the GetParametersByPath -// API action. Also, the Label filter key can't be used with the DescribeParameters -// API action. +// The ParameterStringFilter object is used by the DescribeParameters and GetParametersByPath +// API actions. However, not all of the pattern values listed for Key can be +// used with both actions. +// +// For DescribeActions, all of the listed patterns are valid, with the exception +// of Label. +// +// For GetParametersByPath, the following patterns listed for Key are not valid: +// Name, Path, and Tier. +// +// For examples of CLI commands demonstrating valid parameter filter constructions, +// see Searching for Systems Manager Parameters (http://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-search.html) +// in the AWS Systems Manager User Guide. type ParameterStringFilter struct { _ struct{} `type:"structure"` @@ -4086,8 +4150,14 @@ type ParameterStringFilter struct { // Key is a required field Key *string `min:"1" type:"string" required:"true"` - // Valid options are Equals and BeginsWith. For Path filter, valid options are - // Recursive and OneLevel. + // For all filters used with DescribeParameters, valid options include Equals + // and BeginsWith. The Name filter additionally supports the Contains option. + // (Exception: For filters using the key Path, valid options include Recursive + // and OneLevel.) + // + // For filters used with GetParametersByPath, valid options include Equals and + // BeginsWith. (Exception: For filters using the key Label, the only valid option + // is Equals.) Option *string `min:"1" type:"string"` // The value you want to search for. @@ -4716,6 +4786,56 @@ func (s ResourceComplianceSummaryItem) String() string { return awsutil.Prettify(s) } +// Information about the AwsOrganizationsSource resource data sync source. A +// sync source of this type can synchronize data from AWS Organizations or, +// if an AWS Organization is not present, from multiple AWS Regions. +type ResourceDataSyncAwsOrganizationsSource struct { + _ struct{} `type:"structure"` + + // If an AWS Organization is present, this is either OrganizationalUnits or + // EntireOrganization. For OrganizationalUnits, the data is aggregated from + // a set of organization units. For EntireOrganization, the data is aggregated + // from the entire AWS Organization. + // + // OrganizationSourceType is a required field + OrganizationSourceType *string `min:"1" type:"string" required:"true"` + + // The AWS Organizations organization units included in the sync. + OrganizationalUnits []ResourceDataSyncOrganizationalUnit `min:"1" type:"list"` +} + +// String returns the string representation +func (s ResourceDataSyncAwsOrganizationsSource) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceDataSyncAwsOrganizationsSource) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ResourceDataSyncAwsOrganizationsSource"} + + if s.OrganizationSourceType == nil { + invalidParams.Add(aws.NewErrParamRequired("OrganizationSourceType")) + } + if s.OrganizationSourceType != nil && len(*s.OrganizationSourceType) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("OrganizationSourceType", 1)) + } + if s.OrganizationalUnits != nil && len(s.OrganizationalUnits) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("OrganizationalUnits", 1)) + } + if s.OrganizationalUnits != nil { + for i, v := range s.OrganizationalUnits { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "OrganizationalUnits", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Information about a Resource Data Sync configuration, including its current // status and last successful sync. type ResourceDataSyncItem struct { @@ -4739,8 +4859,20 @@ type ResourceDataSyncItem struct { // The date and time the configuration was created (UTC). SyncCreatedTime *time.Time `type:"timestamp"` + // The date and time the resource data sync was changed. + SyncLastModifiedTime *time.Time `type:"timestamp"` + // The name of the Resource Data Sync. SyncName *string `min:"1" type:"string"` + + // Information about the source where the data was synchronized. + SyncSource *ResourceDataSyncSourceWithState `type:"structure"` + + // The type of resource data sync. If SyncType is SyncToDestination, then the + // resource data sync synchronizes data to an Amazon S3 bucket. If the SyncType + // is SyncFromSource then the resource data sync synchronizes data from AWS + // Organizations or from multiple AWS Regions. + SyncType *string `min:"1" type:"string"` } // String returns the string representation @@ -4748,6 +4880,32 @@ func (s ResourceDataSyncItem) String() string { return awsutil.Prettify(s) } +// The AWS Organizations organizational unit data source for the sync. +type ResourceDataSyncOrganizationalUnit struct { + _ struct{} `type:"structure"` + + // The AWS Organization unit ID data source for the sync. + OrganizationalUnitId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceDataSyncOrganizationalUnit) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceDataSyncOrganizationalUnit) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ResourceDataSyncOrganizationalUnit"} + if s.OrganizationalUnitId != nil && len(*s.OrganizationalUnitId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("OrganizationalUnitId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Information about the target Amazon S3 bucket for the Resource Data Sync. type ResourceDataSyncS3Destination struct { _ struct{} `type:"structure"` @@ -4813,6 +4971,115 @@ func (s *ResourceDataSyncS3Destination) Validate() error { return nil } +// Information about the source of the data included in the resource data sync. +type ResourceDataSyncSource struct { + _ struct{} `type:"structure"` + + // The field name in SyncSource for the ResourceDataSyncAwsOrganizationsSource + // type. + AwsOrganizationsSource *ResourceDataSyncAwsOrganizationsSource `type:"structure"` + + // Whether to automatically synchronize and aggregate data from new AWS Regions + // when those Regions come online. + IncludeFutureRegions *bool `type:"boolean"` + + // The SyncSource AWS Regions included in the resource data sync. + // + // SourceRegions is a required field + SourceRegions []string `type:"list" required:"true"` + + // The type of data source for the resource data sync. SourceType is either + // AwsOrganizations (if an organization is present in AWS Organizations) or + // singleAccountMultiRegions. + // + // SourceType is a required field + SourceType *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResourceDataSyncSource) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceDataSyncSource) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ResourceDataSyncSource"} + + if s.SourceRegions == nil { + invalidParams.Add(aws.NewErrParamRequired("SourceRegions")) + } + + if s.SourceType == nil { + invalidParams.Add(aws.NewErrParamRequired("SourceType")) + } + if s.SourceType != nil && len(*s.SourceType) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SourceType", 1)) + } + if s.AwsOrganizationsSource != nil { + if err := s.AwsOrganizationsSource.Validate(); err != nil { + invalidParams.AddNested("AwsOrganizationsSource", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The data type name for including resource data sync state. There are four +// sync states: +// +// OrganizationNotExists (Your organization doesn't exist) +// +// NoPermissions (The system can't locate the service-linked role. This role +// is automatically created when a user creates a resource data sync in Explorer.) +// +// InvalidOrganizationalUnit (You specified or selected an invalid unit in the +// resource data sync configuration.) +// +// TrustedAccessDisabled (You disabled Systems Manager access in the organization +// in AWS Organizations.) +type ResourceDataSyncSourceWithState struct { + _ struct{} `type:"structure"` + + // The field name in SyncSource for the ResourceDataSyncAwsOrganizationsSource + // type. + AwsOrganizationsSource *ResourceDataSyncAwsOrganizationsSource `type:"structure"` + + // Whether to automatically synchronize and aggregate data from new AWS Regions + // when those Regions come online. + IncludeFutureRegions *bool `type:"boolean"` + + // The SyncSource AWS Regions included in the resource data sync. + SourceRegions []string `type:"list"` + + // The type of data source for the resource data sync. SourceType is either + // AwsOrganizations (if an organization is present in AWS Organizations) or + // singleAccountMultiRegions. + SourceType *string `min:"1" type:"string"` + + // The data type name for including resource data sync state. There are four + // sync states: + // + // OrganizationNotExists: Your organization doesn't exist. + // + // NoPermissions: The system can't locate the service-linked role. This role + // is automatically created when a user creates a resource data sync in Explorer. + // + // InvalidOrganizationalUnit: You specified or selected an invalid unit in the + // resource data sync configuration. + // + // TrustedAccessDisabled: You disabled Systems Manager access in the organization + // in AWS Organizations. + State *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceDataSyncSourceWithState) String() string { + return awsutil.Prettify(s) +} + // The inventory item result attribute. type ResultAttribute struct { _ struct{} `type:"structure"` @@ -5325,8 +5592,16 @@ func (s *Tag) Validate() error { // * Key=tag-key,Values=Name,Instance-Type,CostCenter // // * (Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup +// This example demonstrates how to target all resources in the resource +// group ProductionResourceGroup in your maintenance window. // // * (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC +// This example demonstrates how to target only Amazon EC2 instances and +// VPCs in your maintenance window. +// +// * (State Manager association targets only) Key=InstanceIds,Values=* This +// example demonstrates how to target all managed instances in the AWS Region +// where the association was created. // // For information about how to send commands that target instances using Key,Value // parameters, see Using Targets and Rate Controls to Send Commands to a Fleet diff --git a/service/storagegateway/api_enums.go b/service/storagegateway/api_enums.go index 59f981be1d4..051ee03568a 100644 --- a/service/storagegateway/api_enums.go +++ b/service/storagegateway/api_enums.go @@ -2,6 +2,46 @@ package storagegateway +type ActiveDirectoryStatus string + +// Enum values for ActiveDirectoryStatus +const ( + ActiveDirectoryStatusAccessDenied ActiveDirectoryStatus = "ACCESS_DENIED" + ActiveDirectoryStatusDetached ActiveDirectoryStatus = "DETACHED" + ActiveDirectoryStatusJoined ActiveDirectoryStatus = "JOINED" + ActiveDirectoryStatusJoining ActiveDirectoryStatus = "JOINING" + ActiveDirectoryStatusNetworkError ActiveDirectoryStatus = "NETWORK_ERROR" + ActiveDirectoryStatusTimeout ActiveDirectoryStatus = "TIMEOUT" + ActiveDirectoryStatusUnknownError ActiveDirectoryStatus = "UNKNOWN_ERROR" +) + +func (enum ActiveDirectoryStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ActiveDirectoryStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type AvailabilityMonitorTestStatus string + +// Enum values for AvailabilityMonitorTestStatus +const ( + AvailabilityMonitorTestStatusComplete AvailabilityMonitorTestStatus = "COMPLETE" + AvailabilityMonitorTestStatusFailed AvailabilityMonitorTestStatus = "FAILED" + AvailabilityMonitorTestStatusPending AvailabilityMonitorTestStatus = "PENDING" +) + +func (enum AvailabilityMonitorTestStatus) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum AvailabilityMonitorTestStatus) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type ErrorCode string // Enum values for ErrorCode @@ -37,6 +77,7 @@ const ( ErrorCodeLocalStorageLimitExceeded ErrorCode = "LocalStorageLimitExceeded" ErrorCodeLunAlreadyAllocated ErrorCode = "LunAlreadyAllocated " ErrorCodeLunInvalid ErrorCode = "LunInvalid" + ErrorCodeJoinDomainInProgress ErrorCode = "JoinDomainInProgress" ErrorCodeMaximumContentLengthExceeded ErrorCode = "MaximumContentLengthExceeded" ErrorCodeMaximumTapeCartridgeCountExceeded ErrorCode = "MaximumTapeCartridgeCountExceeded" ErrorCodeMaximumVolumeCountExceeded ErrorCode = "MaximumVolumeCountExceeded" @@ -96,6 +137,25 @@ func (enum FileShareType) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type HostEnvironment string + +// Enum values for HostEnvironment +const ( + HostEnvironmentVmware HostEnvironment = "VMWARE" + HostEnvironmentHyperV HostEnvironment = "HYPER-V" + HostEnvironmentEc2 HostEnvironment = "EC2" + HostEnvironmentOther HostEnvironment = "OTHER" +) + +func (enum HostEnvironment) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum HostEnvironment) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + // A value that sets the access control list permission for objects in the S3 // bucket that a file gateway puts objects into. The default value is "private". type ObjectACL string diff --git a/service/storagegateway/api_op_DeleteBandwidthRateLimit.go b/service/storagegateway/api_op_DeleteBandwidthRateLimit.go index 779a48d93dc..1085befa098 100644 --- a/service/storagegateway/api_op_DeleteBandwidthRateLimit.go +++ b/service/storagegateway/api_op_DeleteBandwidthRateLimit.go @@ -83,7 +83,8 @@ const opDeleteBandwidthRateLimit = "DeleteBandwidthRateLimit" // upload and download bandwidth rate limit, or you can delete both. If you // delete only one of the limits, the other limit remains unchanged. To specify // which gateway to work with, use the Amazon Resource Name (ARN) of the gateway -// in your request. +// in your request. This operation is supported for the stored volume, cached +// volume and tape gateway types. // // // Example sending a request using DeleteBandwidthRateLimitRequest. // req := client.DeleteBandwidthRateLimitRequest(params) diff --git a/service/storagegateway/api_op_DeleteChapCredentials.go b/service/storagegateway/api_op_DeleteChapCredentials.go index a71bc41e03e..a7805af4ca4 100644 --- a/service/storagegateway/api_op_DeleteChapCredentials.go +++ b/service/storagegateway/api_op_DeleteChapCredentials.go @@ -80,7 +80,8 @@ const opDeleteChapCredentials = "DeleteChapCredentials" // AWS Storage Gateway. // // Deletes Challenge-Handshake Authentication Protocol (CHAP) credentials for -// a specified iSCSI target and initiator pair. +// a specified iSCSI target and initiator pair. This operation is supported +// in volume and tape gateway types. // // // Example sending a request using DeleteChapCredentialsRequest. // req := client.DeleteChapCredentialsRequest(params) diff --git a/service/storagegateway/api_op_DescribeAvailabilityMonitorTest.go b/service/storagegateway/api_op_DescribeAvailabilityMonitorTest.go new file mode 100644 index 00000000000..752eaf87cbe --- /dev/null +++ b/service/storagegateway/api_op_DescribeAvailabilityMonitorTest.go @@ -0,0 +1,134 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package storagegateway + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DescribeAvailabilityMonitorTestInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + // + // GatewayARN is a required field + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAvailabilityMonitorTestInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAvailabilityMonitorTestInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DescribeAvailabilityMonitorTestInput"} + + if s.GatewayARN == nil { + invalidParams.Add(aws.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(aws.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeAvailabilityMonitorTestOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` + + // The time the High Availability monitoring test was started. If a test hasn't + // been performed, the value of this field is null. + StartTime *time.Time `type:"timestamp"` + + // The status of the High Availability monitoring test. If a test hasn't been + // performed, the value of this field is null. + Status AvailabilityMonitorTestStatus `type:"string" enum:"true"` +} + +// String returns the string representation +func (s DescribeAvailabilityMonitorTestOutput) String() string { + return awsutil.Prettify(s) +} + +const opDescribeAvailabilityMonitorTest = "DescribeAvailabilityMonitorTest" + +// DescribeAvailabilityMonitorTestRequest returns a request value for making API operation for +// AWS Storage Gateway. +// +// Returns information about the most recent High Availability monitoring test +// that was performed on the host in a cluster. If a test isn't performed, the +// status and start time in the response would be null. +// +// // Example sending a request using DescribeAvailabilityMonitorTestRequest. +// req := client.DescribeAvailabilityMonitorTestRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DescribeAvailabilityMonitorTest +func (c *Client) DescribeAvailabilityMonitorTestRequest(input *DescribeAvailabilityMonitorTestInput) DescribeAvailabilityMonitorTestRequest { + op := &aws.Operation{ + Name: opDescribeAvailabilityMonitorTest, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeAvailabilityMonitorTestInput{} + } + + req := c.newRequest(op, input, &DescribeAvailabilityMonitorTestOutput{}) + return DescribeAvailabilityMonitorTestRequest{Request: req, Input: input, Copy: c.DescribeAvailabilityMonitorTestRequest} +} + +// DescribeAvailabilityMonitorTestRequest is the request type for the +// DescribeAvailabilityMonitorTest API operation. +type DescribeAvailabilityMonitorTestRequest struct { + *aws.Request + Input *DescribeAvailabilityMonitorTestInput + Copy func(*DescribeAvailabilityMonitorTestInput) DescribeAvailabilityMonitorTestRequest +} + +// Send marshals and sends the DescribeAvailabilityMonitorTest API request. +func (r DescribeAvailabilityMonitorTestRequest) Send(ctx context.Context) (*DescribeAvailabilityMonitorTestResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DescribeAvailabilityMonitorTestResponse{ + DescribeAvailabilityMonitorTestOutput: r.Request.Data.(*DescribeAvailabilityMonitorTestOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DescribeAvailabilityMonitorTestResponse is the response type for the +// DescribeAvailabilityMonitorTest API operation. +type DescribeAvailabilityMonitorTestResponse struct { + *DescribeAvailabilityMonitorTestOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DescribeAvailabilityMonitorTest request. +func (r *DescribeAvailabilityMonitorTestResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/storagegateway/api_op_DescribeBandwidthRateLimit.go b/service/storagegateway/api_op_DescribeBandwidthRateLimit.go index 93fdd2fac40..49c8182de4b 100644 --- a/service/storagegateway/api_op_DescribeBandwidthRateLimit.go +++ b/service/storagegateway/api_op_DescribeBandwidthRateLimit.go @@ -70,7 +70,8 @@ const opDescribeBandwidthRateLimit = "DescribeBandwidthRateLimit" // AWS Storage Gateway. // // Returns the bandwidth rate limits of a gateway. By default, these limits -// are not set, which means no bandwidth rate limiting is in effect. +// are not set, which means no bandwidth rate limiting is in effect. This operation +// is supported for the stored volume, cached volume and tape gateway types.' // // This operation only returns a value for a bandwidth rate limit only if the // limit is set. If no limits are set for the gateway, then this operation returns diff --git a/service/storagegateway/api_op_DescribeChapCredentials.go b/service/storagegateway/api_op_DescribeChapCredentials.go index afdfc9b3b5e..da145ca720a 100644 --- a/service/storagegateway/api_op_DescribeChapCredentials.go +++ b/service/storagegateway/api_op_DescribeChapCredentials.go @@ -77,6 +77,7 @@ const opDescribeChapCredentials = "DescribeChapCredentials" // // Returns an array of Challenge-Handshake Authentication Protocol (CHAP) credentials // information for a specified iSCSI target, one for each target-initiator pair. +// This operation is supported in the volume and tape gateway types. // // // Example sending a request using DescribeChapCredentialsRequest. // req := client.DescribeChapCredentialsRequest(params) diff --git a/service/storagegateway/api_op_DescribeGatewayInformation.go b/service/storagegateway/api_op_DescribeGatewayInformation.go index b0e976200d6..840b808ec03 100644 --- a/service/storagegateway/api_op_DescribeGatewayInformation.go +++ b/service/storagegateway/api_op_DescribeGatewayInformation.go @@ -46,8 +46,8 @@ func (s *DescribeGatewayInformationInput) Validate() error { type DescribeGatewayInformationOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was - // used to monitor and log events in the gateway. + // The Amazon Resource Name (ARN) of the Amazon CloudWatch Log Group that is + // used to monitor events in the gateway. CloudWatchLogGroupARN *string `type:"string"` // The ID of the Amazon EC2 instance that was used to launch the gateway. @@ -81,6 +81,9 @@ type DescribeGatewayInformationOutput struct { // The type of the gateway. GatewayType *string `min:"2" type:"string"` + // The type of hypervisor environment used by the host. + HostEnvironment HostEnvironment `type:"string" enum:"true"` + // The date on which the last software update was applied to the gateway. If // the gateway has never been updated, this field does not return a value in // the response. diff --git a/service/storagegateway/api_op_DescribeSMBSettings.go b/service/storagegateway/api_op_DescribeSMBSettings.go index 33546a03053..d1e3837eb56 100644 --- a/service/storagegateway/api_op_DescribeSMBSettings.go +++ b/service/storagegateway/api_op_DescribeSMBSettings.go @@ -44,6 +44,28 @@ func (s *DescribeSMBSettingsInput) Validate() error { type DescribeSMBSettingsOutput struct { _ struct{} `type:"structure"` + // Indicates the status of a gateway that is a member of the Active Directory + // domain. + // + // * ACCESS_DENIED: Indicates that the JoinDomain operation failed due to + // an authentication error. + // + // * DETACHED: Indicates that gateway is not joined to a domain. + // + // * JOINED: Indicates that the gateway has successfully joined a domain. + // + // * JOINING: Indicates that a JoinDomain operation is in progress. + // + // * NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network + // or connectivity error. + // + // * TIMEOUT: Indicates that the JoinDomain operation failed because the + // operation didn't complete within the allotted time. + // + // * UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to + // another type of error. + ActiveDirectoryStatus ActiveDirectoryStatus `type:"string" enum:"true"` + // The name of the domain that the gateway is joined to. DomainName *string `min:"1" type:"string"` diff --git a/service/storagegateway/api_op_DetachVolume.go b/service/storagegateway/api_op_DetachVolume.go index e83c0ad97da..0a952ffbf38 100644 --- a/service/storagegateway/api_op_DetachVolume.go +++ b/service/storagegateway/api_op_DetachVolume.go @@ -68,7 +68,8 @@ const opDetachVolume = "DetachVolume" // from the specified gateway. Detaching and attaching a volume enables you // to recover your data from one gateway to a different gateway without creating // a snapshot. It also makes it easier to move your volumes from an on-premises -// gateway to a gateway hosted on an Amazon EC2 instance. +// gateway to a gateway hosted on an Amazon EC2 instance. This operation is +// only supported in the volume gateway type. // // // Example sending a request using DetachVolumeRequest. // req := client.DetachVolumeRequest(params) diff --git a/service/storagegateway/api_op_JoinDomain.go b/service/storagegateway/api_op_JoinDomain.go index 2e7d7e3e9ed..e467b8dbb1f 100644 --- a/service/storagegateway/api_op_JoinDomain.go +++ b/service/storagegateway/api_op_JoinDomain.go @@ -40,8 +40,14 @@ type JoinDomainInput struct { // Password is a required field Password *string `min:"1" type:"string" required:"true" sensitive:"true"` + // Specifies the time in seconds, in which the JoinDomain operation must complete. + // The default is 20 seconds. + TimeoutInSeconds *int64 `type:"integer"` + // Sets the user name of user who has permission to add the gateway to the Active - // Directory domain. + // Directory domain. The domain user account should be enabled to join computers + // to the domain. For example, you can use the domain administrator account + // or an account with delegated permissions to join computers to the domain. // // UserName is a required field UserName *string `min:"1" type:"string" required:"true"` @@ -97,6 +103,27 @@ func (s *JoinDomainInput) Validate() error { type JoinDomainOutput struct { _ struct{} `type:"structure"` + // Indicates the status of the gateway as a member of the Active Directory domain. + // + // * ACCESS_DENIED: Indicates that the JoinDomain operation failed due to + // an authentication error. + // + // * DETACHED: Indicates that gateway is not joined to a domain. + // + // * JOINED: Indicates that the gateway has successfully joined a domain. + // + // * JOINING: Indicates that a JoinDomain operation is in progress. + // + // * NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network + // or connectivity error. + // + // * TIMEOUT: Indicates that the JoinDomain operation failed because the + // operation didn't complete within the allotted time. + // + // * UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to + // another type of error. + ActiveDirectoryStatus ActiveDirectoryStatus `type:"string" enum:"true"` + // The unique Amazon Resource Name (ARN) of the gateway that joined the domain. GatewayARN *string `min:"50" type:"string"` } diff --git a/service/storagegateway/api_op_ListTagsForResource.go b/service/storagegateway/api_op_ListTagsForResource.go index b5daff915fa..5de2c8535b3 100644 --- a/service/storagegateway/api_op_ListTagsForResource.go +++ b/service/storagegateway/api_op_ListTagsForResource.go @@ -83,7 +83,7 @@ const opListTagsForResource = "ListTagsForResource" // AWS Storage Gateway. // // Lists the tags that have been added to the specified resource. This operation -// is only supported in the cached volume, stored volume and tape gateway type. +// is supported in storage gateways of all types. // // // Example sending a request using ListTagsForResourceRequest. // req := client.ListTagsForResourceRequest(params) diff --git a/service/storagegateway/api_op_RefreshCache.go b/service/storagegateway/api_op_RefreshCache.go index ea5448dbbdf..6ba8a5ad7b7 100644 --- a/service/storagegateway/api_op_RefreshCache.go +++ b/service/storagegateway/api_op_RefreshCache.go @@ -95,6 +95,17 @@ const opRefreshCache = "RefreshCache" // for new files on the gateway file share. You can subscribe to be notified // through an CloudWatch event when your RefreshCache operation completes. // +// Throttle limit: This API is asynchronous so the gateway will accept no more +// than two refreshes at any time. We recommend using the refresh-complete CloudWatch +// event notification before issuing additional requests. For more information, +// see Getting Notified About File Operations (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification). +// +// If you invoke the RefreshCache API when two requests are already being processed, +// any new request will cause an InvalidGatewayRequestException error because +// too many requests were sent to the server. +// +// For more information, see "https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification". +// // // Example sending a request using RefreshCacheRequest. // req := client.RefreshCacheRequest(params) // resp, err := req.Send(context.TODO()) diff --git a/service/storagegateway/api_op_RemoveTagsFromResource.go b/service/storagegateway/api_op_RemoveTagsFromResource.go index 8cd4bc0fee3..afd2626e03f 100644 --- a/service/storagegateway/api_op_RemoveTagsFromResource.go +++ b/service/storagegateway/api_op_RemoveTagsFromResource.go @@ -71,8 +71,8 @@ const opRemoveTagsFromResource = "RemoveTagsFromResource" // RemoveTagsFromResourceRequest returns a request value for making API operation for // AWS Storage Gateway. // -// Removes one or more tags from the specified resource. This operation is only -// supported in the cached volume, stored volume and tape gateway types. +// Removes one or more tags from the specified resource. This operation is supported +// in storage gateways of all types. // // // Example sending a request using RemoveTagsFromResourceRequest. // req := client.RemoveTagsFromResourceRequest(params) diff --git a/service/storagegateway/api_op_StartAvailabilityMonitorTest.go b/service/storagegateway/api_op_StartAvailabilityMonitorTest.go new file mode 100644 index 00000000000..6a98fb6671a --- /dev/null +++ b/service/storagegateway/api_op_StartAvailabilityMonitorTest.go @@ -0,0 +1,129 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package storagegateway + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type StartAvailabilityMonitorTestInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + // + // GatewayARN is a required field + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartAvailabilityMonitorTestInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartAvailabilityMonitorTestInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "StartAvailabilityMonitorTestInput"} + + if s.GatewayARN == nil { + invalidParams.Add(aws.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(aws.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type StartAvailabilityMonitorTestOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s StartAvailabilityMonitorTestOutput) String() string { + return awsutil.Prettify(s) +} + +const opStartAvailabilityMonitorTest = "StartAvailabilityMonitorTest" + +// StartAvailabilityMonitorTestRequest returns a request value for making API operation for +// AWS Storage Gateway. +// +// Start a test that verifies that the specified gateway is configured for High +// Availability monitoring in your host environment. This request only initiates +// the test and that a successful response only indicates that the test was +// started. It doesn't indicate that the test passed. For the status of the +// test, invoke the DescribeAvailabilityMonitorTest API. +// +// Starting this test will cause your gateway to go offline for a brief period. +// +// // Example sending a request using StartAvailabilityMonitorTestRequest. +// req := client.StartAvailabilityMonitorTestRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/StartAvailabilityMonitorTest +func (c *Client) StartAvailabilityMonitorTestRequest(input *StartAvailabilityMonitorTestInput) StartAvailabilityMonitorTestRequest { + op := &aws.Operation{ + Name: opStartAvailabilityMonitorTest, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartAvailabilityMonitorTestInput{} + } + + req := c.newRequest(op, input, &StartAvailabilityMonitorTestOutput{}) + return StartAvailabilityMonitorTestRequest{Request: req, Input: input, Copy: c.StartAvailabilityMonitorTestRequest} +} + +// StartAvailabilityMonitorTestRequest is the request type for the +// StartAvailabilityMonitorTest API operation. +type StartAvailabilityMonitorTestRequest struct { + *aws.Request + Input *StartAvailabilityMonitorTestInput + Copy func(*StartAvailabilityMonitorTestInput) StartAvailabilityMonitorTestRequest +} + +// Send marshals and sends the StartAvailabilityMonitorTest API request. +func (r StartAvailabilityMonitorTestRequest) Send(ctx context.Context) (*StartAvailabilityMonitorTestResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &StartAvailabilityMonitorTestResponse{ + StartAvailabilityMonitorTestOutput: r.Request.Data.(*StartAvailabilityMonitorTestOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// StartAvailabilityMonitorTestResponse is the response type for the +// StartAvailabilityMonitorTest API operation. +type StartAvailabilityMonitorTestResponse struct { + *StartAvailabilityMonitorTestOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// StartAvailabilityMonitorTest request. +func (r *StartAvailabilityMonitorTestResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/storagegateway/api_op_UpdateBandwidthRateLimit.go b/service/storagegateway/api_op_UpdateBandwidthRateLimit.go index 758ee28a3a5..57b7659b908 100644 --- a/service/storagegateway/api_op_UpdateBandwidthRateLimit.go +++ b/service/storagegateway/api_op_UpdateBandwidthRateLimit.go @@ -80,7 +80,8 @@ const opUpdateBandwidthRateLimit = "UpdateBandwidthRateLimit" // // Updates the bandwidth rate limits of a gateway. You can update both the upload // and download bandwidth rate limit or specify only one of the two. If you -// don't set a bandwidth rate limit, the existing rate limit remains. +// don't set a bandwidth rate limit, the existing rate limit remains. This operation +// is supported for the stored volume, cached volume and tape gateway types.' // // By default, a gateway's bandwidth rate limits are not set. If you don't set // any limit, the gateway does not have any limitations on its bandwidth usage diff --git a/service/storagegateway/api_op_UpdateChapCredentials.go b/service/storagegateway/api_op_UpdateChapCredentials.go index d9dc7ab8ccd..2a21ee832ae 100644 --- a/service/storagegateway/api_op_UpdateChapCredentials.go +++ b/service/storagegateway/api_op_UpdateChapCredentials.go @@ -113,7 +113,8 @@ const opUpdateChapCredentials = "UpdateChapCredentials" // // Updates the Challenge-Handshake Authentication Protocol (CHAP) credentials // for a specified iSCSI target. By default, a gateway does not have CHAP enabled; -// however, for added security, you might use it. +// however, for added security, you might use it. This operation is supported +// in the volume and tape gateway types. // // When you update CHAP credentials, all existing connections on the target // are closed and initiators must reconnect with the new credentials. diff --git a/service/storagegateway/storagegatewayiface/interface.go b/service/storagegateway/storagegatewayiface/interface.go index 96cfb5753ab..7f8398d36c1 100644 --- a/service/storagegateway/storagegatewayiface/interface.go +++ b/service/storagegateway/storagegatewayiface/interface.go @@ -111,6 +111,8 @@ type ClientAPI interface { DeleteVolumeRequest(*storagegateway.DeleteVolumeInput) storagegateway.DeleteVolumeRequest + DescribeAvailabilityMonitorTestRequest(*storagegateway.DescribeAvailabilityMonitorTestInput) storagegateway.DescribeAvailabilityMonitorTestRequest + DescribeBandwidthRateLimitRequest(*storagegateway.DescribeBandwidthRateLimitInput) storagegateway.DescribeBandwidthRateLimitRequest DescribeCacheRequest(*storagegateway.DescribeCacheInput) storagegateway.DescribeCacheRequest @@ -185,6 +187,8 @@ type ClientAPI interface { ShutdownGatewayRequest(*storagegateway.ShutdownGatewayInput) storagegateway.ShutdownGatewayRequest + StartAvailabilityMonitorTestRequest(*storagegateway.StartAvailabilityMonitorTestInput) storagegateway.StartAvailabilityMonitorTestRequest + StartGatewayRequest(*storagegateway.StartGatewayInput) storagegateway.StartGatewayRequest UpdateBandwidthRateLimitRequest(*storagegateway.UpdateBandwidthRateLimitInput) storagegateway.UpdateBandwidthRateLimitRequest diff --git a/service/support/api_integ_test.go b/service/support/api_integ_test.go index 3d6f305e01f..8784a2ddb9a 100644 --- a/service/support/api_integ_test.go +++ b/service/support/api_integ_test.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/awserr" + "github.com/aws/aws-sdk-go-v2/aws/defaults" "github.com/aws/aws-sdk-go-v2/internal/awstesting/integration" "github.com/aws/aws-sdk-go-v2/service/support" ) @@ -27,7 +28,7 @@ func TestInteg_00_DescribeServices(t *testing.T) { params := &support.DescribeServicesInput{} req := svc.DescribeServicesRequest(params) - + req.Handlers.Validate.Remove(defaults.ValidateParametersHandler) _, err := req.Send(ctx) if err != nil { t.Errorf("expect no error, got %v", err) @@ -48,7 +49,7 @@ func TestInteg_01_CreateCase(t *testing.T) { } req := svc.CreateCaseRequest(params) - + req.Handlers.Validate.Remove(defaults.ValidateParametersHandler) _, err := req.Send(ctx) if err == nil { t.Fatalf("expect request to fail") diff --git a/service/transcribe/api_types.go b/service/transcribe/api_types.go index bb6ad8eab37..7678488e48c 100644 --- a/service/transcribe/api_types.go +++ b/service/transcribe/api_types.go @@ -66,12 +66,22 @@ type Settings struct { // request. If you set both, your request returns a BadRequestException. ChannelIdentification *bool `type:"boolean"` + // The number of alternative transcriptions that the service should return. + // If you specify the MaxAlternatives field, you must set the ShowAlternatives + // field to true. + MaxAlternatives *int64 `min:"2" type:"integer"` + // The maximum number of speakers to identify in the input audio. If there are // more speakers in the audio than this number, multiple speakers will be identified // as a single speaker. If you specify the MaxSpeakerLabels field, you must // set the ShowSpeakerLabels field to true. MaxSpeakerLabels *int64 `min:"2" type:"integer"` + // Determines whether the transcription contains alternative transcriptions. + // If you set the ShowAlternatives field to true, you must also set the maximum + // number of alternatives to return in the MaxAlternatives field. + ShowAlternatives *bool `type:"boolean"` + // Determines whether the transcription job uses speaker recognition to identify // different speakers in the input audio. Speaker recognition labels individual // speakers in the audio file. If you set the ShowSpeakerLabels field to true, @@ -93,6 +103,9 @@ func (s Settings) String() string { // Validate inspects the fields of the type to determine if they are valid. func (s *Settings) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "Settings"} + if s.MaxAlternatives != nil && *s.MaxAlternatives < 2 { + invalidParams.Add(aws.NewErrParamMinValue("MaxAlternatives", 2)) + } if s.MaxSpeakerLabels != nil && *s.MaxSpeakerLabels < 2 { invalidParams.Add(aws.NewErrParamMinValue("MaxSpeakerLabels", 2)) } diff --git a/service/workspaces/api_enums.go b/service/workspaces/api_enums.go index 3efadd07f61..95e7d8f94f0 100644 --- a/service/workspaces/api_enums.go +++ b/service/workspaces/api_enums.go @@ -2,6 +2,23 @@ package workspaces +type AccessPropertyValue string + +// Enum values for AccessPropertyValue +const ( + AccessPropertyValueAllow AccessPropertyValue = "ALLOW" + AccessPropertyValueDeny AccessPropertyValue = "DENY" +) + +func (enum AccessPropertyValue) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum AccessPropertyValue) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type Compute string // Enum values for Compute @@ -196,6 +213,23 @@ func (enum TargetWorkspaceState) MarshalValueBuf(b []byte) ([]byte, error) { return append(b, enum...), nil } +type Tenancy string + +// Enum values for Tenancy +const ( + TenancyDedicated Tenancy = "DEDICATED" + TenancyShared Tenancy = "SHARED" +) + +func (enum Tenancy) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum Tenancy) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + type WorkspaceDirectoryState string // Enum values for WorkspaceDirectoryState diff --git a/service/workspaces/api_errors.go b/service/workspaces/api_errors.go index ca1e70285ad..770e7980bb3 100644 --- a/service/workspaces/api_errors.go +++ b/service/workspaces/api_errors.go @@ -71,10 +71,29 @@ const ( // The specified resource is not available. ErrCodeResourceUnavailableException = "ResourceUnavailableException" + // ErrCodeUnsupportedNetworkConfigurationException for service response error code + // "UnsupportedNetworkConfigurationException". + // + // The configuration of this network is not supported for this operation, or + // your network configuration conflicts with the Amazon WorkSpaces management + // network IP range. For more information, see Configure a VPC for Amazon WorkSpaces + // (https://docs.aws.amazon.com/workspaces/latest/adminguide/amazon-workspaces-vpc.html). + ErrCodeUnsupportedNetworkConfigurationException = "UnsupportedNetworkConfigurationException" + // ErrCodeUnsupportedWorkspaceConfigurationException for service response error code // "UnsupportedWorkspaceConfigurationException". // // The configuration of this WorkSpace is not supported for this operation. - // For more information, see the Amazon WorkSpaces Administration Guide (https://docs.aws.amazon.com/workspaces/latest/adminguide/). + // For more information, see Required Configuration and Service Components for + // WorkSpaces (https://docs.aws.amazon.com/workspaces/latest/adminguide/required-service-components.html). ErrCodeUnsupportedWorkspaceConfigurationException = "UnsupportedWorkspaceConfigurationException" + + // ErrCodeWorkspacesDefaultRoleNotFoundException for service response error code + // "WorkspacesDefaultRoleNotFoundException". + // + // The workspaces_DefaultRole role could not be found. If this is the first + // time you are registering a directory, you will need to create the workspaces_DefaultRole + // role before you can register a directory. For more information, see Creating + // the workspaces_DefaultRole Role (https://docs.aws.amazon.com/workspaces/latest/adminguide/workspaces-access-control.html#create-default-role). + ErrCodeWorkspacesDefaultRoleNotFoundException = "WorkspacesDefaultRoleNotFoundException" ) diff --git a/service/workspaces/api_op_AssociateIpGroups.go b/service/workspaces/api_op_AssociateIpGroups.go index 9482290e457..0a7888b4d22 100644 --- a/service/workspaces/api_op_AssociateIpGroups.go +++ b/service/workspaces/api_op_AssociateIpGroups.go @@ -15,7 +15,7 @@ type AssociateIpGroupsInput struct { // The identifier of the directory. // // DirectoryId is a required field - DirectoryId *string `type:"string" required:"true"` + DirectoryId *string `min:"10" type:"string" required:"true"` // The identifiers of one or more IP access control groups. // @@ -35,6 +35,9 @@ func (s *AssociateIpGroupsInput) Validate() error { if s.DirectoryId == nil { invalidParams.Add(aws.NewErrParamRequired("DirectoryId")) } + if s.DirectoryId != nil && len(*s.DirectoryId) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("DirectoryId", 10)) + } if s.GroupIds == nil { invalidParams.Add(aws.NewErrParamRequired("GroupIds")) diff --git a/service/workspaces/api_op_DeregisterWorkspaceDirectory.go b/service/workspaces/api_op_DeregisterWorkspaceDirectory.go new file mode 100644 index 00000000000..67e8d289daa --- /dev/null +++ b/service/workspaces/api_op_DeregisterWorkspaceDirectory.go @@ -0,0 +1,122 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package workspaces + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type DeregisterWorkspaceDirectoryInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory. If any WorkSpaces are registered to this + // directory, you must remove them before you deregister the directory, or you + // will receive an OperationNotSupportedException error. + // + // DirectoryId is a required field + DirectoryId *string `min:"10" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterWorkspaceDirectoryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterWorkspaceDirectoryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeregisterWorkspaceDirectoryInput"} + + if s.DirectoryId == nil { + invalidParams.Add(aws.NewErrParamRequired("DirectoryId")) + } + if s.DirectoryId != nil && len(*s.DirectoryId) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("DirectoryId", 10)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterWorkspaceDirectoryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterWorkspaceDirectoryOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeregisterWorkspaceDirectory = "DeregisterWorkspaceDirectory" + +// DeregisterWorkspaceDirectoryRequest returns a request value for making API operation for +// Amazon WorkSpaces. +// +// Deregisters the specified directory. This operation is asynchronous and returns +// before the WorkSpace directory is deregistered. If any WorkSpaces are registered +// to this directory, you must remove them before you can deregister the directory. +// +// // Example sending a request using DeregisterWorkspaceDirectoryRequest. +// req := client.DeregisterWorkspaceDirectoryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeregisterWorkspaceDirectory +func (c *Client) DeregisterWorkspaceDirectoryRequest(input *DeregisterWorkspaceDirectoryInput) DeregisterWorkspaceDirectoryRequest { + op := &aws.Operation{ + Name: opDeregisterWorkspaceDirectory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterWorkspaceDirectoryInput{} + } + + req := c.newRequest(op, input, &DeregisterWorkspaceDirectoryOutput{}) + return DeregisterWorkspaceDirectoryRequest{Request: req, Input: input, Copy: c.DeregisterWorkspaceDirectoryRequest} +} + +// DeregisterWorkspaceDirectoryRequest is the request type for the +// DeregisterWorkspaceDirectory API operation. +type DeregisterWorkspaceDirectoryRequest struct { + *aws.Request + Input *DeregisterWorkspaceDirectoryInput + Copy func(*DeregisterWorkspaceDirectoryInput) DeregisterWorkspaceDirectoryRequest +} + +// Send marshals and sends the DeregisterWorkspaceDirectory API request. +func (r DeregisterWorkspaceDirectoryRequest) Send(ctx context.Context) (*DeregisterWorkspaceDirectoryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeregisterWorkspaceDirectoryResponse{ + DeregisterWorkspaceDirectoryOutput: r.Request.Data.(*DeregisterWorkspaceDirectoryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeregisterWorkspaceDirectoryResponse is the response type for the +// DeregisterWorkspaceDirectory API operation. +type DeregisterWorkspaceDirectoryResponse struct { + *DeregisterWorkspaceDirectoryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeregisterWorkspaceDirectory request. +func (r *DeregisterWorkspaceDirectoryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/workspaces/api_op_DescribeAccount.go b/service/workspaces/api_op_DescribeAccount.go index d3ad1c8e21f..4c385e5ea02 100644 --- a/service/workspaces/api_op_DescribeAccount.go +++ b/service/workspaces/api_op_DescribeAccount.go @@ -44,7 +44,7 @@ const opDescribeAccount = "DescribeAccount" // DescribeAccountRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Retrieves a list that describes the configuration of bring your own license +// Retrieves a list that describes the configuration of Bring Your Own License // (BYOL) for the specified account. // // // Example sending a request using DescribeAccountRequest. diff --git a/service/workspaces/api_op_DescribeAccountModifications.go b/service/workspaces/api_op_DescribeAccountModifications.go index b5985198152..3e582ac8f12 100644 --- a/service/workspaces/api_op_DescribeAccountModifications.go +++ b/service/workspaces/api_op_DescribeAccountModifications.go @@ -56,8 +56,8 @@ const opDescribeAccountModifications = "DescribeAccountModifications" // DescribeAccountModificationsRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Retrieves a list that describes modifications to the configuration of bring -// your own license (BYOL) for the specified account. +// Retrieves a list that describes modifications to the configuration of Bring +// Your Own License (BYOL) for the specified account. // // // Example sending a request using DescribeAccountModificationsRequest. // req := client.DescribeAccountModificationsRequest(params) diff --git a/service/workspaces/api_op_DescribeWorkspaceDirectories.go b/service/workspaces/api_op_DescribeWorkspaceDirectories.go index ec1026bb8b4..5faa5e11009 100644 --- a/service/workspaces/api_op_DescribeWorkspaceDirectories.go +++ b/service/workspaces/api_op_DescribeWorkspaceDirectories.go @@ -16,6 +16,9 @@ type DescribeWorkspaceDirectoriesInput struct { // are retrieved. DirectoryIds []string `min:"1" type:"list"` + // The maximum number of directories to return. + Limit *int64 `min:"1" type:"integer"` + // If you received a NextToken from a previous call that was paginated, provide // this token to receive the next set of results. NextToken *string `min:"1" type:"string"` @@ -32,6 +35,9 @@ func (s *DescribeWorkspaceDirectoriesInput) Validate() error { if s.DirectoryIds != nil && len(s.DirectoryIds) < 1 { invalidParams.Add(aws.NewErrParamMinLen("DirectoryIds", 1)) } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(aws.NewErrParamMinValue("Limit", 1)) + } if s.NextToken != nil && len(*s.NextToken) < 1 { invalidParams.Add(aws.NewErrParamMinLen("NextToken", 1)) } @@ -63,8 +69,7 @@ const opDescribeWorkspaceDirectories = "DescribeWorkspaceDirectories" // DescribeWorkspaceDirectoriesRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Describes the available AWS Directory Service directories that are registered -// with Amazon WorkSpaces. +// Describes the available directories that are registered with Amazon WorkSpaces. // // // Example sending a request using DescribeWorkspaceDirectoriesRequest. // req := client.DescribeWorkspaceDirectoriesRequest(params) diff --git a/service/workspaces/api_op_DescribeWorkspaces.go b/service/workspaces/api_op_DescribeWorkspaces.go index 0828154e4cb..359216499a3 100644 --- a/service/workspaces/api_op_DescribeWorkspaces.go +++ b/service/workspaces/api_op_DescribeWorkspaces.go @@ -19,7 +19,7 @@ type DescribeWorkspacesInput struct { // The identifier of the directory. In addition, you can optionally specify // a specific directory user (see UserName). You cannot combine this parameter // with any other filter. - DirectoryId *string `type:"string"` + DirectoryId *string `min:"10" type:"string"` // The maximum number of items to return. Limit *int64 `min:"1" type:"integer"` @@ -48,6 +48,9 @@ func (s DescribeWorkspacesInput) String() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeWorkspacesInput) Validate() error { invalidParams := aws.ErrInvalidParams{Context: "DescribeWorkspacesInput"} + if s.DirectoryId != nil && len(*s.DirectoryId) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("DirectoryId", 10)) + } if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(aws.NewErrParamMinValue("Limit", 1)) } diff --git a/service/workspaces/api_op_DisassociateIpGroups.go b/service/workspaces/api_op_DisassociateIpGroups.go index fc3c28a0d56..23231e29c2e 100644 --- a/service/workspaces/api_op_DisassociateIpGroups.go +++ b/service/workspaces/api_op_DisassociateIpGroups.go @@ -15,7 +15,7 @@ type DisassociateIpGroupsInput struct { // The identifier of the directory. // // DirectoryId is a required field - DirectoryId *string `type:"string" required:"true"` + DirectoryId *string `min:"10" type:"string" required:"true"` // The identifiers of one or more IP access control groups. // @@ -35,6 +35,9 @@ func (s *DisassociateIpGroupsInput) Validate() error { if s.DirectoryId == nil { invalidParams.Add(aws.NewErrParamRequired("DirectoryId")) } + if s.DirectoryId != nil && len(*s.DirectoryId) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("DirectoryId", 10)) + } if s.GroupIds == nil { invalidParams.Add(aws.NewErrParamRequired("GroupIds")) diff --git a/service/workspaces/api_op_ImportWorkspaceImage.go b/service/workspaces/api_op_ImportWorkspaceImage.go index c4bef198e2e..65560025376 100644 --- a/service/workspaces/api_op_ImportWorkspaceImage.go +++ b/service/workspaces/api_op_ImportWorkspaceImage.go @@ -97,7 +97,7 @@ const opImportWorkspaceImage = "ImportWorkspaceImage" // ImportWorkspaceImageRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Imports the specified Windows 7 or Windows 10 bring your own license (BYOL) +// Imports the specified Windows 7 or Windows 10 Bring Your Own License (BYOL) // image into Amazon WorkSpaces. The image must be an already licensed EC2 image // that is in your AWS account, and you must own the image. // diff --git a/service/workspaces/api_op_ListAvailableManagementCidrRanges.go b/service/workspaces/api_op_ListAvailableManagementCidrRanges.go index ef01d4fbbbd..e7b08038bea 100644 --- a/service/workspaces/api_op_ListAvailableManagementCidrRanges.go +++ b/service/workspaces/api_op_ListAvailableManagementCidrRanges.go @@ -74,8 +74,8 @@ const opListAvailableManagementCidrRanges = "ListAvailableManagementCidrRanges" // Amazon WorkSpaces. // // Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that -// you can use for the network management interface when you enable bring your -// own license (BYOL). +// you can use for the network management interface when you enable Bring Your +// Own License (BYOL). // // The management network interface is connected to a secure Amazon WorkSpaces // management network. It is used for interactive streaming of the WorkSpace diff --git a/service/workspaces/api_op_ModifyAccount.go b/service/workspaces/api_op_ModifyAccount.go index 7b63ff4a124..fe93d08fbcd 100644 --- a/service/workspaces/api_op_ModifyAccount.go +++ b/service/workspaces/api_op_ModifyAccount.go @@ -43,7 +43,7 @@ const opModifyAccount = "ModifyAccount" // ModifyAccountRequest returns a request value for making API operation for // Amazon WorkSpaces. // -// Modifies the configuration of bring your own license (BYOL) for the specified +// Modifies the configuration of Bring Your Own License (BYOL) for the specified // account. // // // Example sending a request using ModifyAccountRequest. diff --git a/service/workspaces/api_op_ModifySelfservicePermissions.go b/service/workspaces/api_op_ModifySelfservicePermissions.go new file mode 100644 index 00000000000..eeddbb6bd40 --- /dev/null +++ b/service/workspaces/api_op_ModifySelfservicePermissions.go @@ -0,0 +1,129 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package workspaces + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ModifySelfservicePermissionsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory. + // + // ResourceId is a required field + ResourceId *string `min:"10" type:"string" required:"true"` + + // The permissions to enable or disable self-service capabilities. + // + // SelfservicePermissions is a required field + SelfservicePermissions *SelfservicePermissions `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ModifySelfservicePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifySelfservicePermissionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ModifySelfservicePermissionsInput"} + + if s.ResourceId == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("ResourceId", 10)) + } + + if s.SelfservicePermissions == nil { + invalidParams.Add(aws.NewErrParamRequired("SelfservicePermissions")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifySelfservicePermissionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifySelfservicePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +const opModifySelfservicePermissions = "ModifySelfservicePermissions" + +// ModifySelfservicePermissionsRequest returns a request value for making API operation for +// Amazon WorkSpaces. +// +// Modifies the self-service WorkSpace management capabilities for your users. +// For more information, see Enable Self-Service WorkSpace Management Capabilities +// for Your Users (https://docs.aws.amazon.com/workspaces/latest/adminguide/enable-user-self-service-workspace-management.html). +// +// // Example sending a request using ModifySelfservicePermissionsRequest. +// req := client.ModifySelfservicePermissionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifySelfservicePermissions +func (c *Client) ModifySelfservicePermissionsRequest(input *ModifySelfservicePermissionsInput) ModifySelfservicePermissionsRequest { + op := &aws.Operation{ + Name: opModifySelfservicePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifySelfservicePermissionsInput{} + } + + req := c.newRequest(op, input, &ModifySelfservicePermissionsOutput{}) + return ModifySelfservicePermissionsRequest{Request: req, Input: input, Copy: c.ModifySelfservicePermissionsRequest} +} + +// ModifySelfservicePermissionsRequest is the request type for the +// ModifySelfservicePermissions API operation. +type ModifySelfservicePermissionsRequest struct { + *aws.Request + Input *ModifySelfservicePermissionsInput + Copy func(*ModifySelfservicePermissionsInput) ModifySelfservicePermissionsRequest +} + +// Send marshals and sends the ModifySelfservicePermissions API request. +func (r ModifySelfservicePermissionsRequest) Send(ctx context.Context) (*ModifySelfservicePermissionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ModifySelfservicePermissionsResponse{ + ModifySelfservicePermissionsOutput: r.Request.Data.(*ModifySelfservicePermissionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ModifySelfservicePermissionsResponse is the response type for the +// ModifySelfservicePermissions API operation. +type ModifySelfservicePermissionsResponse struct { + *ModifySelfservicePermissionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ModifySelfservicePermissions request. +func (r *ModifySelfservicePermissionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/workspaces/api_op_ModifyWorkspaceAccessProperties.go b/service/workspaces/api_op_ModifyWorkspaceAccessProperties.go new file mode 100644 index 00000000000..9a13dc4a126 --- /dev/null +++ b/service/workspaces/api_op_ModifyWorkspaceAccessProperties.go @@ -0,0 +1,128 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package workspaces + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ModifyWorkspaceAccessPropertiesInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory. + // + // ResourceId is a required field + ResourceId *string `min:"10" type:"string" required:"true"` + + // The device types and operating systems to enable or disable for access. + // + // WorkspaceAccessProperties is a required field + WorkspaceAccessProperties *WorkspaceAccessProperties `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ModifyWorkspaceAccessPropertiesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyWorkspaceAccessPropertiesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ModifyWorkspaceAccessPropertiesInput"} + + if s.ResourceId == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("ResourceId", 10)) + } + + if s.WorkspaceAccessProperties == nil { + invalidParams.Add(aws.NewErrParamRequired("WorkspaceAccessProperties")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyWorkspaceAccessPropertiesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyWorkspaceAccessPropertiesOutput) String() string { + return awsutil.Prettify(s) +} + +const opModifyWorkspaceAccessProperties = "ModifyWorkspaceAccessProperties" + +// ModifyWorkspaceAccessPropertiesRequest returns a request value for making API operation for +// Amazon WorkSpaces. +// +// Specifies which devices and operating systems users can use to access their +// Workspaces. For more information, see Control Device Access (https://docs.aws.amazon.com/workspaces/latest/adminguide/update-directory-details.html#control-device-access). +// +// // Example sending a request using ModifyWorkspaceAccessPropertiesRequest. +// req := client.ModifyWorkspaceAccessPropertiesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspaceAccessProperties +func (c *Client) ModifyWorkspaceAccessPropertiesRequest(input *ModifyWorkspaceAccessPropertiesInput) ModifyWorkspaceAccessPropertiesRequest { + op := &aws.Operation{ + Name: opModifyWorkspaceAccessProperties, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyWorkspaceAccessPropertiesInput{} + } + + req := c.newRequest(op, input, &ModifyWorkspaceAccessPropertiesOutput{}) + return ModifyWorkspaceAccessPropertiesRequest{Request: req, Input: input, Copy: c.ModifyWorkspaceAccessPropertiesRequest} +} + +// ModifyWorkspaceAccessPropertiesRequest is the request type for the +// ModifyWorkspaceAccessProperties API operation. +type ModifyWorkspaceAccessPropertiesRequest struct { + *aws.Request + Input *ModifyWorkspaceAccessPropertiesInput + Copy func(*ModifyWorkspaceAccessPropertiesInput) ModifyWorkspaceAccessPropertiesRequest +} + +// Send marshals and sends the ModifyWorkspaceAccessProperties API request. +func (r ModifyWorkspaceAccessPropertiesRequest) Send(ctx context.Context) (*ModifyWorkspaceAccessPropertiesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ModifyWorkspaceAccessPropertiesResponse{ + ModifyWorkspaceAccessPropertiesOutput: r.Request.Data.(*ModifyWorkspaceAccessPropertiesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ModifyWorkspaceAccessPropertiesResponse is the response type for the +// ModifyWorkspaceAccessProperties API operation. +type ModifyWorkspaceAccessPropertiesResponse struct { + *ModifyWorkspaceAccessPropertiesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ModifyWorkspaceAccessProperties request. +func (r *ModifyWorkspaceAccessPropertiesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/workspaces/api_op_ModifyWorkspaceCreationProperties.go b/service/workspaces/api_op_ModifyWorkspaceCreationProperties.go new file mode 100644 index 00000000000..db2c3aec317 --- /dev/null +++ b/service/workspaces/api_op_ModifyWorkspaceCreationProperties.go @@ -0,0 +1,132 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package workspaces + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type ModifyWorkspaceCreationPropertiesInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory. + // + // ResourceId is a required field + ResourceId *string `min:"10" type:"string" required:"true"` + + // The default properties for creating WorkSpaces. + // + // WorkspaceCreationProperties is a required field + WorkspaceCreationProperties *WorkspaceCreationProperties `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ModifyWorkspaceCreationPropertiesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyWorkspaceCreationPropertiesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ModifyWorkspaceCreationPropertiesInput"} + + if s.ResourceId == nil { + invalidParams.Add(aws.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("ResourceId", 10)) + } + + if s.WorkspaceCreationProperties == nil { + invalidParams.Add(aws.NewErrParamRequired("WorkspaceCreationProperties")) + } + if s.WorkspaceCreationProperties != nil { + if err := s.WorkspaceCreationProperties.Validate(); err != nil { + invalidParams.AddNested("WorkspaceCreationProperties", err.(aws.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyWorkspaceCreationPropertiesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyWorkspaceCreationPropertiesOutput) String() string { + return awsutil.Prettify(s) +} + +const opModifyWorkspaceCreationProperties = "ModifyWorkspaceCreationProperties" + +// ModifyWorkspaceCreationPropertiesRequest returns a request value for making API operation for +// Amazon WorkSpaces. +// +// Modify the default properties used to create WorkSpaces. +// +// // Example sending a request using ModifyWorkspaceCreationPropertiesRequest. +// req := client.ModifyWorkspaceCreationPropertiesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspaceCreationProperties +func (c *Client) ModifyWorkspaceCreationPropertiesRequest(input *ModifyWorkspaceCreationPropertiesInput) ModifyWorkspaceCreationPropertiesRequest { + op := &aws.Operation{ + Name: opModifyWorkspaceCreationProperties, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyWorkspaceCreationPropertiesInput{} + } + + req := c.newRequest(op, input, &ModifyWorkspaceCreationPropertiesOutput{}) + return ModifyWorkspaceCreationPropertiesRequest{Request: req, Input: input, Copy: c.ModifyWorkspaceCreationPropertiesRequest} +} + +// ModifyWorkspaceCreationPropertiesRequest is the request type for the +// ModifyWorkspaceCreationProperties API operation. +type ModifyWorkspaceCreationPropertiesRequest struct { + *aws.Request + Input *ModifyWorkspaceCreationPropertiesInput + Copy func(*ModifyWorkspaceCreationPropertiesInput) ModifyWorkspaceCreationPropertiesRequest +} + +// Send marshals and sends the ModifyWorkspaceCreationProperties API request. +func (r ModifyWorkspaceCreationPropertiesRequest) Send(ctx context.Context) (*ModifyWorkspaceCreationPropertiesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ModifyWorkspaceCreationPropertiesResponse{ + ModifyWorkspaceCreationPropertiesOutput: r.Request.Data.(*ModifyWorkspaceCreationPropertiesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ModifyWorkspaceCreationPropertiesResponse is the response type for the +// ModifyWorkspaceCreationProperties API operation. +type ModifyWorkspaceCreationPropertiesResponse struct { + *ModifyWorkspaceCreationPropertiesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ModifyWorkspaceCreationProperties request. +func (r *ModifyWorkspaceCreationPropertiesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/workspaces/api_op_RegisterWorkspaceDirectory.go b/service/workspaces/api_op_RegisterWorkspaceDirectory.go new file mode 100644 index 00000000000..471562ba70a --- /dev/null +++ b/service/workspaces/api_op_RegisterWorkspaceDirectory.go @@ -0,0 +1,167 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package workspaces + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +type RegisterWorkspaceDirectoryInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory. You cannot register a directory if it does + // not have a status of Active. If the directory does not have a status of Active, + // you will receive an InvalidResourceStateException error. If you have already + // registered the maximum number of directories that you can register with Amazon + // WorkSpaces, you will receive a ResourceLimitExceededException error. Deregister + // directories that you are not using for WorkSpaces, and try again. + // + // DirectoryId is a required field + DirectoryId *string `min:"10" type:"string" required:"true"` + + // Indicates whether self-service capabilities are enabled or disabled. + EnableSelfService *bool `type:"boolean"` + + // Indicates whether Amazon WorkDocs is enabled or disabled. If you have enabled + // this parameter and WorkDocs is not available in the Region, you will receive + // an OperationNotSupportedException error. Set EnableWorkDocs to disabled, + // and try again. + // + // EnableWorkDocs is a required field + EnableWorkDocs *bool `type:"boolean" required:"true"` + + // The identifiers of the subnets for your virtual private cloud (VPC). Make + // sure that the subnets are in supported Availability Zones. The subnets must + // also be in separate Availability Zones. If these conditions are not met, + // you will receive an OperationNotSupportedException error. + SubnetIds []string `type:"list"` + + // The tags associated with the directory. + Tags []Tag `type:"list"` + + // Indicates whether your WorkSpace directory is dedicated or shared. To use + // Bring Your Own License (BYOL) images, this value must be set to DEDICATED + // and your AWS account must be enabled for BYOL. If your account has not been + // enabled for BYOL, you will receive an InvalidParameterValuesException error. + // For more information about BYOL images, see Bring Your Own Windows Desktop + // Images (https://docs.aws.amazon.com/workspaces/latest/adminguide/byol-windows-images.html). + Tenancy Tenancy `type:"string" enum:"true"` +} + +// String returns the string representation +func (s RegisterWorkspaceDirectoryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterWorkspaceDirectoryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RegisterWorkspaceDirectoryInput"} + + if s.DirectoryId == nil { + invalidParams.Add(aws.NewErrParamRequired("DirectoryId")) + } + if s.DirectoryId != nil && len(*s.DirectoryId) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("DirectoryId", 10)) + } + + if s.EnableWorkDocs == nil { + invalidParams.Add(aws.NewErrParamRequired("EnableWorkDocs")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterWorkspaceDirectoryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterWorkspaceDirectoryOutput) String() string { + return awsutil.Prettify(s) +} + +const opRegisterWorkspaceDirectory = "RegisterWorkspaceDirectory" + +// RegisterWorkspaceDirectoryRequest returns a request value for making API operation for +// Amazon WorkSpaces. +// +// Registers the specified directory. This operation is asynchronous and returns +// before the WorkSpace directory is registered. If this is the first time you +// are registering a directory, you will need to create the workspaces_DefaultRole +// role before you can register a directory. For more information, see Creating +// the workspaces_DefaultRole Role (https://docs.aws.amazon.com/workspaces/latest/adminguide/workspaces-access-control.html#create-default-role). +// +// // Example sending a request using RegisterWorkspaceDirectoryRequest. +// req := client.RegisterWorkspaceDirectoryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/RegisterWorkspaceDirectory +func (c *Client) RegisterWorkspaceDirectoryRequest(input *RegisterWorkspaceDirectoryInput) RegisterWorkspaceDirectoryRequest { + op := &aws.Operation{ + Name: opRegisterWorkspaceDirectory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterWorkspaceDirectoryInput{} + } + + req := c.newRequest(op, input, &RegisterWorkspaceDirectoryOutput{}) + return RegisterWorkspaceDirectoryRequest{Request: req, Input: input, Copy: c.RegisterWorkspaceDirectoryRequest} +} + +// RegisterWorkspaceDirectoryRequest is the request type for the +// RegisterWorkspaceDirectory API operation. +type RegisterWorkspaceDirectoryRequest struct { + *aws.Request + Input *RegisterWorkspaceDirectoryInput + Copy func(*RegisterWorkspaceDirectoryInput) RegisterWorkspaceDirectoryRequest +} + +// Send marshals and sends the RegisterWorkspaceDirectory API request. +func (r RegisterWorkspaceDirectoryRequest) Send(ctx context.Context) (*RegisterWorkspaceDirectoryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &RegisterWorkspaceDirectoryResponse{ + RegisterWorkspaceDirectoryOutput: r.Request.Data.(*RegisterWorkspaceDirectoryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// RegisterWorkspaceDirectoryResponse is the response type for the +// RegisterWorkspaceDirectory API operation. +type RegisterWorkspaceDirectoryResponse struct { + *RegisterWorkspaceDirectoryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// RegisterWorkspaceDirectory request. +func (r *RegisterWorkspaceDirectoryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/service/workspaces/api_types.go b/service/workspaces/api_types.go index 4871e7765fa..563b28d5958 100644 --- a/service/workspaces/api_types.go +++ b/service/workspaces/api_types.go @@ -13,7 +13,7 @@ import ( var _ aws.Config var _ = awsutil.Prettify -// Describes a modification to the configuration of bring your own license (BYOL) +// Describes a modification to the configuration of Bring Your Own License (BYOL) // for the specified account. type AccountModification struct { _ struct{} `type:"structure"` @@ -57,13 +57,14 @@ func (s ComputeType) String() string { return awsutil.Prettify(s) } -// Describes the default values used to create a WorkSpace. +// Describes the default values that are used to create WorkSpaces. For more +// information, see Update Directory Details for Your WorkSpaces (https://docs.aws.amazon.com/workspaces/latest/adminguide/update-directory-details.html). type DefaultWorkspaceCreationProperties struct { _ struct{} `type:"structure"` // The identifier of any security groups to apply to WorkSpaces when they are // created. - CustomSecurityGroupId *string `type:"string"` + CustomSecurityGroupId *string `min:"11" type:"string"` // The organizational unit (OU) in the directory for the WorkSpace machine accounts. DefaultOu *string `type:"string"` @@ -80,10 +81,14 @@ type DefaultWorkspaceCreationProperties struct { // (https://docs.aws.amazon.com/workspaces/latest/adminguide/amazon-workspaces-vpc.html). EnableInternetAccess *bool `type:"boolean"` + // Specifies whether maintenance mode is enabled for WorkSpaces. For more information, + // see WorkSpace Maintenance (https://docs.aws.amazon.com/workspaces/latest/adminguide/workspace-maintenance.html). + EnableMaintenanceMode *bool `type:"boolean"` + // Specifies whether the directory is enabled for Amazon WorkDocs. EnableWorkDocs *bool `type:"boolean"` - // Specifies whether the WorkSpace user is an administrator on the WorkSpace. + // Specifies whether WorkSpace users are local administrators on their WorkSpaces. UserEnabledAsLocalAdministrator *bool `type:"boolean"` } @@ -281,6 +286,35 @@ func (s RootStorage) String() string { return awsutil.Prettify(s) } +// Describes the self-service permissions for a directory. For more information, +// see Enable Self-Service WorkSpace Management Capabilities for Your Users +// (https://docs.aws.amazon.com/workspaces/latest/adminguide/enable-user-self-service-workspace-management.html). +type SelfservicePermissions struct { + _ struct{} `type:"structure"` + + // Specifies whether users can change the compute type (bundle) for their WorkSpace. + ChangeComputeType ReconnectEnum `type:"string" enum:"true"` + + // Specifies whether users can increase the volume size of the drives on their + // WorkSpace. + IncreaseVolumeSize ReconnectEnum `type:"string" enum:"true"` + + // Specifies whether users can rebuild the operating system of a WorkSpace to + // its original state. + RebuildWorkspace ReconnectEnum `type:"string" enum:"true"` + + // Specifies whether users can restart their WorkSpace. + RestartWorkspace ReconnectEnum `type:"string" enum:"true"` + + // Specifies whether users can switch the running mode of their WorkSpace. + SwitchRunningMode ReconnectEnum `type:"string" enum:"true"` +} + +// String returns the string representation +func (s SelfservicePermissions) String() string { + return awsutil.Prettify(s) +} + // Describes a snapshot. type Snapshot struct { _ struct{} `type:"structure"` @@ -408,7 +442,7 @@ type Workspace struct { ComputerName *string `type:"string"` // The identifier of the AWS Directory Service directory for the WorkSpace. - DirectoryId *string `type:"string"` + DirectoryId *string `min:"10" type:"string"` // The error code that is returned if the WorkSpace cannot be created. ErrorCode *string `type:"string"` @@ -430,7 +464,7 @@ type Workspace struct { State WorkspaceState `type:"string" enum:"true"` // The identifier of the subnet for the WorkSpace. - SubnetId *string `type:"string"` + SubnetId *string `min:"15" type:"string"` // The user for the WorkSpace. UserName *string `min:"1" type:"string"` @@ -453,6 +487,44 @@ func (s Workspace) String() string { return awsutil.Prettify(s) } +// The device types and operating systems that can be used to access a WorkSpace. +// For more information, see Amazon WorkSpaces Client Network Requirements (https://docs.aws.amazon.com/workspaces/latest/adminguide/workspaces-network-requirements.html). +type WorkspaceAccessProperties struct { + _ struct{} `type:"structure"` + + // Indicates whether users can use Android devices to access their WorkSpaces. + DeviceTypeAndroid AccessPropertyValue `type:"string" enum:"true"` + + // Indicates whether users can use Chromebooks to access their WorkSpaces. + DeviceTypeChromeOs AccessPropertyValue `type:"string" enum:"true"` + + // Indicates whether users can use iOS devices to access their WorkSpaces. + DeviceTypeIos AccessPropertyValue `type:"string" enum:"true"` + + // Indicates whether users can use macOS clients to access their WorkSpaces. + // To restrict WorkSpaces access to trusted devices (also known as managed devices) + // with valid certificates, specify a value of TRUST. For more information, + // see Restrict WorkSpaces Access to Trusted Devices (https://docs.aws.amazon.com/workspaces/latest/adminguide/trusted-devices.html). + DeviceTypeOsx AccessPropertyValue `type:"string" enum:"true"` + + // Indicates whether users can access their WorkSpaces through a web browser. + DeviceTypeWeb AccessPropertyValue `type:"string" enum:"true"` + + // Indicates whether users can use Windows clients to access their WorkSpaces. + // To restrict WorkSpaces access to trusted devices (also known as managed devices) + // with valid certificates, specify a value of TRUST. For more information, + // see Restrict WorkSpaces Access to Trusted Devices (https://docs.aws.amazon.com/workspaces/latest/adminguide/trusted-devices.html). + DeviceTypeWindows AccessPropertyValue `type:"string" enum:"true"` + + // Indicates whether users can use zero client devices to access their WorkSpaces. + DeviceTypeZeroClient AccessPropertyValue `type:"string" enum:"true"` +} + +// String returns the string representation +func (s WorkspaceAccessProperties) String() string { + return awsutil.Prettify(s) +} + // Describes a WorkSpace bundle. type WorkspaceBundle struct { _ struct{} `type:"structure"` @@ -508,7 +580,47 @@ func (s WorkspaceConnectionStatus) String() string { return awsutil.Prettify(s) } -// Describes an AWS Directory Service directory that is used with Amazon WorkSpaces. +// Describes the default properties that are used for creating WorkSpaces. For +// more information, see Update Directory Details for Your WorkSpaces (https://docs.aws.amazon.com/workspaces/latest/adminguide/update-directory-details.html). +type WorkspaceCreationProperties struct { + _ struct{} `type:"structure"` + + // The identifier of your custom security group. + CustomSecurityGroupId *string `min:"11" type:"string"` + + // The default organizational unit (OU) for your WorkSpace directories. + DefaultOu *string `type:"string"` + + // Indicates whether internet access is enabled for your WorkSpaces. + EnableInternetAccess *bool `type:"boolean"` + + // Indicates whether maintenance mode is enabled for your WorkSpaces. For more + // information, see WorkSpace Maintenance (https://docs.aws.amazon.com/workspaces/latest/adminguide/workspace-maintenance.html). + EnableMaintenanceMode *bool `type:"boolean"` + + // Indicates whether users are local administrators of their WorkSpaces. + UserEnabledAsLocalAdministrator *bool `type:"boolean"` +} + +// String returns the string representation +func (s WorkspaceCreationProperties) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WorkspaceCreationProperties) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "WorkspaceCreationProperties"} + if s.CustomSecurityGroupId != nil && len(*s.CustomSecurityGroupId) < 11 { + invalidParams.Add(aws.NewErrParamMinLen("CustomSecurityGroupId", 11)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Describes a directory that is used with Amazon WorkSpaces. type WorkspaceDirectory struct { _ struct{} `type:"structure"` @@ -519,7 +631,7 @@ type WorkspaceDirectory struct { CustomerUserName *string `min:"1" type:"string"` // The directory identifier. - DirectoryId *string `type:"string"` + DirectoryId *string `min:"10" type:"string"` // The name of the directory. DirectoryName *string `type:"string"` @@ -541,17 +653,28 @@ type WorkspaceDirectory struct { // in their Amazon WorkSpaces client application to connect to the directory. RegistrationCode *string `min:"1" type:"string"` - // The state of the directory's registration with Amazon WorkSpaces + // The default self-service permissions for WorkSpaces in the directory. + SelfservicePermissions *SelfservicePermissions `type:"structure"` + + // The state of the directory's registration with Amazon WorkSpaces. State WorkspaceDirectoryState `type:"string" enum:"true"` // The identifiers of the subnets used with the directory. SubnetIds []string `type:"list"` + // Specifies whether the directory is dedicated or shared. To use Bring Your + // Own License (BYOL), this value must be set to DEDICATED. For more information, + // see Bring Your Own Windows Desktop Images (https://docs.aws.amazon.com/workspaces/latest/adminguide/byol-windows-images.html). + Tenancy Tenancy `type:"string" enum:"true"` + + // The devices and operating systems that users can use to access Workspaces. + WorkspaceAccessProperties *WorkspaceAccessProperties `type:"structure"` + // The default creation properties for all WorkSpaces in the directory. WorkspaceCreationProperties *DefaultWorkspaceCreationProperties `type:"structure"` // The identifier of the security group that is assigned to new WorkSpaces. - WorkspaceSecurityGroupId *string `type:"string"` + WorkspaceSecurityGroupId *string `min:"11" type:"string"` } // String returns the string representation @@ -581,8 +704,9 @@ type WorkspaceImage struct { // The operating system that the image is running. OperatingSystem *OperatingSystem `type:"structure"` - // Specifies whether the image is running on dedicated hardware. When bring - // your own license (BYOL) is enabled, this value is set to DEDICATED. + // Specifies whether the image is running on dedicated hardware. When Bring + // Your Own License (BYOL) is enabled, this value is set to DEDICATED. For more + // information, see Bring Your Own Windows Desktop Images (https://docs.aws.amazon.com/workspaces/latest/adminguide/byol-windows-images.html). RequiredTenancy WorkspaceImageRequiredTenancy `type:"string" enum:"true"` // The status of the image. @@ -635,7 +759,7 @@ type WorkspaceRequest struct { // You can use DescribeWorkspaceDirectories to list the available directories. // // DirectoryId is a required field - DirectoryId *string `type:"string" required:"true"` + DirectoryId *string `min:"10" type:"string" required:"true"` // Indicates whether the data stored on the root volume is encrypted. RootVolumeEncryptionEnabled *bool `type:"boolean"` @@ -643,8 +767,8 @@ type WorkspaceRequest struct { // The tags for the WorkSpace. Tags []Tag `type:"list"` - // The username of the user for the WorkSpace. This username must exist in the - // AWS Directory Service directory for the WorkSpace. + // The user name of the user for the WorkSpace. This user name must exist in + // the AWS Directory Service directory for the WorkSpace. // // UserName is a required field UserName *string `min:"1" type:"string" required:"true"` @@ -675,6 +799,9 @@ func (s *WorkspaceRequest) Validate() error { if s.DirectoryId == nil { invalidParams.Add(aws.NewErrParamRequired("DirectoryId")) } + if s.DirectoryId != nil && len(*s.DirectoryId) < 10 { + invalidParams.Add(aws.NewErrParamMinLen("DirectoryId", 10)) + } if s.UserName == nil { invalidParams.Add(aws.NewErrParamRequired("UserName")) diff --git a/service/workspaces/workspacesiface/interface.go b/service/workspaces/workspacesiface/interface.go index 1612e066410..146b0dac5b7 100644 --- a/service/workspaces/workspacesiface/interface.go +++ b/service/workspaces/workspacesiface/interface.go @@ -79,6 +79,8 @@ type ClientAPI interface { DeleteWorkspaceImageRequest(*workspaces.DeleteWorkspaceImageInput) workspaces.DeleteWorkspaceImageRequest + DeregisterWorkspaceDirectoryRequest(*workspaces.DeregisterWorkspaceDirectoryInput) workspaces.DeregisterWorkspaceDirectoryRequest + DescribeAccountRequest(*workspaces.DescribeAccountInput) workspaces.DescribeAccountRequest DescribeAccountModificationsRequest(*workspaces.DescribeAccountModificationsInput) workspaces.DescribeAccountModificationsRequest @@ -111,6 +113,12 @@ type ClientAPI interface { ModifyClientPropertiesRequest(*workspaces.ModifyClientPropertiesInput) workspaces.ModifyClientPropertiesRequest + ModifySelfservicePermissionsRequest(*workspaces.ModifySelfservicePermissionsInput) workspaces.ModifySelfservicePermissionsRequest + + ModifyWorkspaceAccessPropertiesRequest(*workspaces.ModifyWorkspaceAccessPropertiesInput) workspaces.ModifyWorkspaceAccessPropertiesRequest + + ModifyWorkspaceCreationPropertiesRequest(*workspaces.ModifyWorkspaceCreationPropertiesInput) workspaces.ModifyWorkspaceCreationPropertiesRequest + ModifyWorkspacePropertiesRequest(*workspaces.ModifyWorkspacePropertiesInput) workspaces.ModifyWorkspacePropertiesRequest ModifyWorkspaceStateRequest(*workspaces.ModifyWorkspaceStateInput) workspaces.ModifyWorkspaceStateRequest @@ -119,6 +127,8 @@ type ClientAPI interface { RebuildWorkspacesRequest(*workspaces.RebuildWorkspacesInput) workspaces.RebuildWorkspacesRequest + RegisterWorkspaceDirectoryRequest(*workspaces.RegisterWorkspaceDirectoryInput) workspaces.RegisterWorkspaceDirectoryRequest + RestoreWorkspaceRequest(*workspaces.RestoreWorkspaceInput) workspaces.RestoreWorkspaceRequest RevokeIpRulesRequest(*workspaces.RevokeIpRulesInput) workspaces.RevokeIpRulesRequest