diff --git a/CHANGELOG.md b/CHANGELOG.md index c1468139a2..ecf3441ffb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,23 @@ +Release v1.40.5 (2021-07-21) +=== + +### Service Client Updates +* `service/codebuild`: Updates service API and documentation + * AWS CodeBuild now allows you to set the access permissions for build artifacts, project artifacts, and log files that are uploaded to an Amazon S3 bucket that is owned by another account. +* `service/elasticloadbalancingv2`: Updates service documentation +* `service/elasticmapreduce`: Updates service API, documentation, and paginators + * EMR now supports new DescribeReleaseLabel and ListReleaseLabel APIs. They can provide Amazon EMR release label details. You can programmatically list available releases and applications for a specific Amazon EMR release label. +* `service/iam`: Updates service documentation + * Documentation updates for AWS Identity and Access Management (IAM). +* `service/kendra`: Updates service API and documentation + * Amazon Kendra now provides a data source connector for Amazon WorkDocs. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-workdocs.html +* `service/lambda`: Updates service API and documentation + * New ResourceConflictException error code for PutFunctionEventInvokeConfig, UpdateFunctionEventInvokeConfig, and DeleteFunctionEventInvokeConfig operations. +* `service/personalize`: Updates service API and documentation +* `service/proton`: Updates service documentation +* `service/rds`: Updates service API, documentation, waiters, paginators, and examples + * Adds the OriginalSnapshotCreateTime field to the DBSnapshot response object. This field timestamps the underlying data of a snapshot and doesn't change when the snapshot is copied. + Release v1.40.4 (2021-07-20) === diff --git a/aws/version.go b/aws/version.go index 36faa11913..9a521b2a30 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.40.4" +const SDKVersion = "1.40.5" diff --git a/models/apis/codebuild/2016-10-06/api-2.json b/models/apis/codebuild/2016-10-06/api-2.json index 2d6916d457..343ea5861e 100644 --- a/models/apis/codebuild/2016-10-06/api-2.json +++ b/models/apis/codebuild/2016-10-06/api-2.json @@ -699,6 +699,14 @@ } }, "Boolean":{"type":"boolean"}, + "BucketOwnerAccess":{ + "type":"string", + "enum":[ + "NONE", + "READ_ONLY", + "FULL" + ] + }, "Build":{ "type":"structure", "members":{ @@ -744,7 +752,8 @@ "md5sum":{"shape":"String"}, "overrideArtifactName":{"shape":"WrapperBoolean"}, "encryptionDisabled":{"shape":"WrapperBoolean"}, - "artifactIdentifier":{"shape":"String"} + "artifactIdentifier":{"shape":"String"}, + "bucketOwnerAccess":{"shape":"BucketOwnerAccess"} } }, "BuildArtifactsList":{ @@ -1721,7 +1730,8 @@ "packaging":{"shape":"ArtifactPackaging"}, "overrideArtifactName":{"shape":"WrapperBoolean"}, "encryptionDisabled":{"shape":"WrapperBoolean"}, - "artifactIdentifier":{"shape":"String"} + "artifactIdentifier":{"shape":"String"}, + "bucketOwnerAccess":{"shape":"BucketOwnerAccess"} } }, "ProjectArtifactsList":{ @@ -2114,7 +2124,8 @@ "members":{ "status":{"shape":"LogsConfigStatusType"}, "location":{"shape":"String"}, - "encryptionDisabled":{"shape":"WrapperBoolean"} + "encryptionDisabled":{"shape":"WrapperBoolean"}, + "bucketOwnerAccess":{"shape":"BucketOwnerAccess"} } }, "S3ReportExportConfig":{ diff --git a/models/apis/codebuild/2016-10-06/docs-2.json b/models/apis/codebuild/2016-10-06/docs-2.json index ca98b58111..7d467d4dce 100644 --- a/models/apis/codebuild/2016-10-06/docs-2.json +++ b/models/apis/codebuild/2016-10-06/docs-2.json @@ -1,6 +1,6 @@ { "version": "2.0", - "service": "

CodeBuild is a fully managed build service in the cloud. CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in CodeBuild to use your own build tools. CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about CodeBuild, see the CodeBuild User Guide.

", + "service": "AWS CodeBuild

AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild compiles your source code, runs unit tests, and produces artifacts that are ready to deploy. AWS CodeBuild eliminates the need to provision, manage, and scale your own build servers. It provides prepackaged build environments for the most popular programming languages and build tools, such as Apache Maven, Gradle, and more. You can also fully customize build environments in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically to meet peak build requests. You pay only for the build time you consume. For more information about AWS CodeBuild, see the AWS CodeBuild User Guide.

", "operations": { "BatchDeleteBuilds": "

Deletes one or more builds.

", "BatchGetBuildBatches": "

Retrieves information about one or more batch builds.

", @@ -10,31 +10,31 @@ "BatchGetReports": "

Returns an array of reports.

", "CreateProject": "

Creates a build project.

", "CreateReportGroup": "

Creates a report group. A report group contains a collection of reports.

", - "CreateWebhook": "

For an existing CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.

If you enable webhooks for an CodeBuild project, and the project is used as a build step in CodePipeline, then two identical builds are created for each commit. One build is triggered through webhooks, and one through CodePipeline. Because billing is on a per-build basis, you are billed for both builds. Therefore, if you are using CodePipeline, we recommend that you disable webhooks in CodeBuild. In the CodeBuild console, clear the Webhook box. For more information, see step 5 in Change a Build Project's Settings.

", + "CreateWebhook": "

For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source code every time a code change is pushed to the repository.

If you enable webhooks for an AWS CodeBuild project, and the project is used as a build step in AWS CodePipeline, then two identical builds are created for each commit. One build is triggered through webhooks, and one through AWS CodePipeline. Because billing is on a per-build basis, you are billed for both builds. Therefore, if you are using AWS CodePipeline, we recommend that you disable webhooks in AWS CodeBuild. In the AWS CodeBuild console, clear the Webhook box. For more information, see step 5 in Change a Build Project's Settings.

", "DeleteBuildBatch": "

Deletes a batch build.

", "DeleteProject": "

Deletes a build project. When you delete a project, its builds are not deleted.

", "DeleteReport": "

Deletes a report.

", "DeleteReportGroup": "

Deletes a report group. Before you delete a report group, you must delete its reports.

", "DeleteResourcePolicy": "

Deletes a resource policy that is identified by its resource ARN.

", "DeleteSourceCredentials": "

Deletes a set of GitHub, GitHub Enterprise, or Bitbucket source credentials.

", - "DeleteWebhook": "

For an existing CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops CodeBuild from rebuilding the source code every time a code change is pushed to the repository.

", + "DeleteWebhook": "

For an existing AWS CodeBuild build project that has its source code stored in a GitHub or Bitbucket repository, stops AWS CodeBuild from rebuilding the source code every time a code change is pushed to the repository.

", "DescribeCodeCoverages": "

Retrieves one or more code coverage reports.

", "DescribeTestCases": "

Returns a list of details about test cases for a report.

", "GetReportGroupTrend": "

Analyzes and accumulates test report values for the specified test reports.

", "GetResourcePolicy": "

Gets a resource policy that is identified by its resource ARN.

", - "ImportSourceCredentials": "

Imports the source repository credentials for an CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.

", + "ImportSourceCredentials": "

Imports the source repository credentials for an AWS CodeBuild project that has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository.

", "InvalidateProjectCache": "

Resets the cache for a project.

", "ListBuildBatches": "

Retrieves the identifiers of your build batches in the current region.

", "ListBuildBatchesForProject": "

Retrieves the identifiers of the build batches for a specific project.

", "ListBuilds": "

Gets a list of build IDs, with each build ID representing a single build.

", "ListBuildsForProject": "

Gets a list of build identifiers for the specified build project, with each build identifier representing a single build.

", - "ListCuratedEnvironmentImages": "

Gets information about Docker images that are managed by CodeBuild.

", + "ListCuratedEnvironmentImages": "

Gets information about Docker images that are managed by AWS CodeBuild.

", "ListProjects": "

Gets a list of build project names, with each build project name representing a single build project.

", - "ListReportGroups": "

Gets a list ARNs for the report groups in the current Amazon Web Services account.

", - "ListReports": "

Returns a list of ARNs for the reports in the current Amazon Web Services account.

", + "ListReportGroups": "

Gets a list ARNs for the report groups in the current AWS account.

", + "ListReports": "

Returns a list of ARNs for the reports in the current AWS account.

", "ListReportsForReportGroup": "

Returns a list of ARNs for the reports that belong to a ReportGroup.

", - "ListSharedProjects": "

Gets a list of projects that are shared with other Amazon Web Services accounts or users.

", - "ListSharedReportGroups": "

Gets a list of report groups that are shared with other Amazon Web Services accounts or users.

", + "ListSharedProjects": "

Gets a list of projects that are shared with other AWS accounts or users.

", + "ListSharedReportGroups": "

Gets a list of report groups that are shared with other AWS accounts or users.

", "ListSourceCredentials": "

Returns a list of SourceCredentialsInfo objects.

", "PutResourcePolicy": "

Stores a resource policy for the ARN of a Project or ReportGroup object.

", "RetryBuild": "

Restarts a build.

", @@ -45,37 +45,37 @@ "StopBuildBatch": "

Stops a running batch build.

", "UpdateProject": "

Changes the settings of a build project.

", "UpdateReportGroup": "

Updates a report group.

", - "UpdateWebhook": "

Updates the webhook associated with an CodeBuild build project.

If you use Bitbucket for your repository, rotateSecret is ignored.

" + "UpdateWebhook": "

Updates the webhook associated with an AWS CodeBuild build project.

If you use Bitbucket for your repository, rotateSecret is ignored.

" }, "shapes": { "AccountLimitExceededException": { - "base": "

An Amazon Web Services service limit was exceeded for the calling Amazon Web Services account.

", + "base": "

An AWS service limit was exceeded for the calling AWS account.

", "refs": { } }, "ArtifactNamespace": { "base": null, "refs": { - "ProjectArtifacts$namespaceType": "

Along with path and name, the pattern that CodeBuild uses to determine the name and location to store the output artifact:

For example, if path is set to MyArtifacts, namespaceType is set to BUILD_ID, and name is set to MyArtifact.zip, the output artifact is stored in MyArtifacts/<build-ID>/MyArtifact.zip.

" + "ProjectArtifacts$namespaceType": "

Along with path and name, the pattern that AWS CodeBuild uses to determine the name and location to store the output artifact:

For example, if path is set to MyArtifacts, namespaceType is set to BUILD_ID, and name is set to MyArtifact.zip, the output artifact is stored in MyArtifacts/<build-ID>/MyArtifact.zip.

" } }, "ArtifactPackaging": { "base": null, "refs": { - "ProjectArtifacts$packaging": "

The type of build output artifact to create:

" + "ProjectArtifacts$packaging": "

The type of build output artifact to create:

" } }, "ArtifactsType": { "base": null, "refs": { - "ProjectArtifacts$type": "

The type of build output artifact. Valid values include:

", + "ProjectArtifacts$type": "

The type of build output artifact. Valid values include:

", "ResolvedArtifact$type": "

Specifies the type of artifact.

" } }, "AuthType": { "base": null, "refs": { - "ImportSourceCredentialsInput$authType": "

The type of authentication used to connect to a GitHub, GitHub Enterprise, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the CodeBuild console.

", + "ImportSourceCredentialsInput$authType": "

The type of authentication used to connect to a GitHub, GitHub Enterprise, or Bitbucket repository. An OAUTH connection is not supported by the API and must be created using the AWS CodeBuild console.

", "SourceCredentialsInfo$authType": "

The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, or PERSONAL_ACCESS_TOKEN.

" } }, @@ -156,6 +156,14 @@ "UpdateWebhookInput$rotateSecret": "

A boolean value that specifies whether the associated GitHub repository's secret token should be updated. If you use Bitbucket for your repository, rotateSecret is ignored.

" } }, + "BucketOwnerAccess": { + "base": "

Specifies the access for objects that are uploaded to an Amazon S3 bucket that is owned by another account.

By default, only the account that uploads the objects to the bucket has access to these objects. This property allows you to give the bucket owner access to these objects.

NONE

The bucket owner does not have access to the objects. This is the default.

READ_ONLY

The bucket owner has read only access to the objects. The uploading account retains ownership of the objects.

FULL

The bucket owner has full access to the objects. Object ownership is determined by the following criteria:

For more information about Amazon S3 object ownership, see Controlling ownership of uploaded objects using S3 Object Ownership in the Amazon Simple Storage Service User Guide.

", + "refs": { + "BuildArtifacts$bucketOwnerAccess": null, + "ProjectArtifacts$bucketOwnerAccess": null, + "S3LogsConfig$bucketOwnerAccess": null + } + }, "Build": { "base": "

Information about a build.

", "refs": { @@ -284,7 +292,7 @@ } }, "BuildStatusConfig": { - "base": "

Contains information that defines how the CodeBuild build project reports the build status to the source provider.

", + "base": "

Contains information that defines how the AWS CodeBuild build project reports the build status to the source provider.

", "refs": { "ProjectSource$buildStatusConfig": "

Contains information that defines how the build project reports the build status to the source provider. This option is only used when the source provider is GITHUB, GITHUB_ENTERPRISE, or BITBUCKET.

", "StartBuildInput$buildStatusConfigOverride": "

Contains information that defines how the build project reports the build status to the source provider. This option is only used when the source provider is GITHUB, GITHUB_ENTERPRISE, or BITBUCKET.

" @@ -329,10 +337,10 @@ } }, "CloudWatchLogsConfig": { - "base": "

Information about CloudWatch Logs for a build project.

", + "base": "

Information about Amazon CloudWatch Logs for a build project.

", "refs": { - "LogsConfig$cloudWatchLogs": "

Information about CloudWatch Logs for a build project. CloudWatch Logs are enabled by default.

", - "LogsLocation$cloudWatchLogs": "

Information about CloudWatch Logs for a build project.

" + "LogsConfig$cloudWatchLogs": "

Information about Amazon CloudWatch Logs for a build project. Amazon CloudWatch Logs are enabled by default.

", + "LogsLocation$cloudWatchLogs": "

Information about Amazon CloudWatch Logs for a build project.

" } }, "CodeCoverage": { @@ -356,7 +364,7 @@ "ComputeType": { "base": null, "refs": { - "ProjectEnvironment$computeType": "

Information about the compute resources the build project uses. Available values include:

If you use BUILD_GENERAL1_LARGE:

For more information, see Build Environment Compute Types in the CodeBuild User Guide.

", + "ProjectEnvironment$computeType": "

Information about the compute resources the build project uses. Available values include:

If you use BUILD_GENERAL1_LARGE:

For more information, see Build Environment Compute Types in the AWS CodeBuild User Guide.

", "StartBuildBatchInput$computeTypeOverride": "

The name of a compute type for this batch build that overrides the one specified in the batch build project.

", "StartBuildInput$computeTypeOverride": "

The name of a compute type for this build that overrides the one specified in the build project.

" } @@ -364,7 +372,7 @@ "ComputeTypesAllowed": { "base": null, "refs": { - "BatchRestrictions$computeTypesAllowed": "

An array of strings that specify the compute types that are allowed for the batch build. See Build environment compute types in the CodeBuild User Guide for these values.

" + "BatchRestrictions$computeTypesAllowed": "

An array of strings that specify the compute types that are allowed for the batch build. See Build environment compute types in the AWS CodeBuild User Guide for these values.

" } }, "CreateProjectInput": { @@ -400,7 +408,7 @@ "CredentialProviderType": { "base": null, "refs": { - "RegistryCredential$credentialProvider": "

The service that created the credentials to access a private Docker registry. The valid value, SECRETS_MANAGER, is for Secrets Manager.

" + "RegistryCredential$credentialProvider": "

The service that created the credentials to access a private Docker registry. The valid value, SECRETS_MANAGER, is for AWS Secrets Manager.

" } }, "DebugSession": { @@ -500,7 +508,7 @@ } }, "EnvironmentImage": { - "base": "

Information about a Docker image that is managed by CodeBuild.

", + "base": "

Information about a Docker image that is managed by AWS CodeBuild.

", "refs": { "EnvironmentImages$member": null } @@ -512,7 +520,7 @@ } }, "EnvironmentLanguage": { - "base": "

A set of Docker images that are related by programming language and are managed by CodeBuild.

", + "base": "

A set of Docker images that are related by programming language and are managed by AWS CodeBuild.

", "refs": { "EnvironmentLanguages$member": null } @@ -524,7 +532,7 @@ } }, "EnvironmentPlatform": { - "base": "

A set of Docker images that are related by platform and are managed by CodeBuild.

", + "base": "

A set of Docker images that are related by platform and are managed by AWS CodeBuild.

", "refs": { "EnvironmentPlatforms$member": null } @@ -532,13 +540,13 @@ "EnvironmentPlatforms": { "base": null, "refs": { - "ListCuratedEnvironmentImagesOutput$platforms": "

Information about supported platforms for Docker images that are managed by CodeBuild.

" + "ListCuratedEnvironmentImagesOutput$platforms": "

Information about supported platforms for Docker images that are managed by AWS CodeBuild.

" } }, "EnvironmentType": { "base": null, "refs": { - "ProjectEnvironment$type": "

The type of build environment to use for related builds.

For more information, see Build environment compute types in the CodeBuild user guide.

", + "ProjectEnvironment$type": "

The type of build environment to use for related builds.

", "StartBuildBatchInput$environmentTypeOverride": "

A container type for this batch build that overrides the one specified in the batch build project.

", "StartBuildInput$environmentTypeOverride": "

A container type for this build that overrides the one specified in the build project.

" } @@ -552,7 +560,7 @@ "EnvironmentVariableType": { "base": null, "refs": { - "EnvironmentVariable$type": "

The type of environment variable. Valid values include:

" + "EnvironmentVariable$type": "

The type of environment variable. Valid values include:

" } }, "EnvironmentVariables": { @@ -564,7 +572,7 @@ } }, "ExportedEnvironmentVariable": { - "base": "

Contains information about an exported environment variable.

Exported environment variables are used in conjunction with CodePipeline to export environment variables from the current build stage to subsequent stages in the pipeline. For more information, see Working with variables in the CodePipeline User Guide.

During a build, the value of a variable is available starting with the install phase. It can be updated between the start of the install phase and the end of the post_build phase. After the post_build phase ends, the value of exported variables cannot change.

", + "base": "

Contains information about an exported environment variable.

Exported environment variables are used in conjunction with AWS CodePipeline to export environment variables from the current build stage to subsequent stages in the pipeline. For more information, see Working with variables in the AWS CodePipeline User Guide.

During a build, the value of a variable is available starting with the install phase. It can be updated between the start of the install phase and the end of the post_build phase. After the post_build phase ends, the value of exported variables cannot change.

", "refs": { "ExportedEnvironmentVariables$member": null } @@ -572,7 +580,7 @@ "ExportedEnvironmentVariables": { "base": null, "refs": { - "Build$exportedEnvironmentVariables": "

A list of exported environment variables for this build.

Exported environment variables are used in conjunction with CodePipeline to export environment variables from the current build stage to subsequent stages in the pipeline. For more information, see Working with variables in the CodePipeline User Guide.

" + "Build$exportedEnvironmentVariables": "

A list of exported environment variables for this build.

Exported environment variables are used in conjunction with AWS CodePipeline to export environment variables from the current build stage to subsequent stages in the pipeline. For more information, see Working with variables in the AWS CodePipeline User Guide.

" } }, "FileSystemType": { @@ -624,11 +632,11 @@ } }, "GitSubmodulesConfig": { - "base": "

Information about the Git submodules configuration for an CodeBuild build project.

", + "base": "

Information about the Git submodules configuration for an AWS CodeBuild build project.

", "refs": { "ProjectSource$gitSubmodulesConfig": "

Information about the Git submodules configuration for the build project.

", "StartBuildBatchInput$gitSubmodulesConfigOverride": "

A GitSubmodulesConfig object that overrides the Git submodules configuration for this batch build.

", - "StartBuildInput$gitSubmodulesConfigOverride": "

Information about the Git submodules configuration for this build of an CodeBuild build project.

" + "StartBuildInput$gitSubmodulesConfigOverride": "

Information about the Git submodules configuration for this build of an AWS CodeBuild build project.

" } }, "Identifiers": { @@ -640,9 +648,9 @@ "ImagePullCredentialsType": { "base": null, "refs": { - "ProjectEnvironment$imagePullCredentialsType": "

The type of credentials CodeBuild uses to pull images in your build. There are two valid values:

When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an CodeBuild curated image, you must use CODEBUILD credentials.

", - "StartBuildBatchInput$imagePullCredentialsTypeOverride": "

The type of credentials CodeBuild uses to pull images in your batch build. There are two valid values:

CODEBUILD

Specifies that CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust CodeBuild's service principal.

SERVICE_ROLE

Specifies that CodeBuild uses your build project's service role.

When using a cross-account or private registry image, you must use SERVICE_ROLE credentials. When using an CodeBuild curated image, you must use CODEBUILD credentials.

", - "StartBuildInput$imagePullCredentialsTypeOverride": "

The type of credentials CodeBuild uses to pull images in your build. There are two valid values:

CODEBUILD

Specifies that CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust CodeBuild's service principal.

SERVICE_ROLE

Specifies that CodeBuild uses your build project's service role.

When using a cross-account or private registry image, you must use SERVICE_ROLE credentials. When using an CodeBuild curated image, you must use CODEBUILD credentials.

" + "ProjectEnvironment$imagePullCredentialsType": "

The type of credentials AWS CodeBuild uses to pull images in your build. There are two valid values:

When you use a cross-account or private registry image, you must use SERVICE_ROLE credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD credentials.

", + "StartBuildBatchInput$imagePullCredentialsTypeOverride": "

The type of credentials AWS CodeBuild uses to pull images in your batch build. There are two valid values:

CODEBUILD

Specifies that AWS CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust AWS CodeBuild's service principal.

SERVICE_ROLE

Specifies that AWS CodeBuild uses your build project's service role.

When using a cross-account or private registry image, you must use SERVICE_ROLE credentials. When using an AWS CodeBuild curated image, you must use CODEBUILD credentials.

", + "StartBuildInput$imagePullCredentialsTypeOverride": "

The type of credentials AWS CodeBuild uses to pull images in your build. There are two valid values:

CODEBUILD

Specifies that AWS CodeBuild uses its own credentials. This requires that you modify your ECR repository policy to trust AWS CodeBuild's service principal.

SERVICE_ROLE

Specifies that AWS CodeBuild uses your build project's service role.

When using a cross-account or private registry image, you must use SERVICE_ROLE credentials. When using an AWS CodeBuild curated image, you must use CODEBUILD credentials.

" } }, "ImageVersions": { @@ -809,27 +817,27 @@ } }, "LogsConfig": { - "base": "

Information about logs for a build project. These can be logs in CloudWatch Logs, built in a specified S3 bucket, or both.

", + "base": "

Information about logs for a build project. These can be logs in Amazon CloudWatch Logs, built in a specified S3 bucket, or both.

", "refs": { "BuildBatch$logConfig": null, - "CreateProjectInput$logsConfig": "

Information about logs for the build project. These can be logs in CloudWatch Logs, logs uploaded to a specified S3 bucket, or both.

", - "Project$logsConfig": "

Information about logs for the build project. A project can create logs in CloudWatch Logs, an S3 bucket, or both.

", + "CreateProjectInput$logsConfig": "

Information about logs for the build project. These can be logs in Amazon CloudWatch Logs, logs uploaded to a specified S3 bucket, or both.

", + "Project$logsConfig": "

Information about logs for the build project. A project can create logs in Amazon CloudWatch Logs, an S3 bucket, or both.

", "StartBuildBatchInput$logsConfigOverride": "

A LogsConfig object that override the log settings defined in the batch build project.

", "StartBuildInput$logsConfigOverride": "

Log settings for this build that override the log settings defined in the build project.

", - "UpdateProjectInput$logsConfig": "

Information about logs for the build project. A project can create logs in CloudWatch Logs, logs in an S3 bucket, or both.

" + "UpdateProjectInput$logsConfig": "

Information about logs for the build project. A project can create logs in Amazon CloudWatch Logs, logs in an S3 bucket, or both.

" } }, "LogsConfigStatusType": { "base": null, "refs": { - "CloudWatchLogsConfig$status": "

The current status of the logs in CloudWatch Logs for a build project. Valid values are:

", + "CloudWatchLogsConfig$status": "

The current status of the logs in Amazon CloudWatch Logs for a build project. Valid values are:

", "S3LogsConfig$status": "

The current status of the S3 build logs. Valid values are:

" } }, "LogsLocation": { - "base": "

Information about build logs in CloudWatch Logs.

", + "base": "

Information about build logs in Amazon CloudWatch Logs.

", "refs": { - "Build$logs": "

Information about the build's logs in CloudWatch Logs.

" + "Build$logs": "

Information about the build's logs in Amazon CloudWatch Logs.

" } }, "NetworkInterface": { @@ -843,18 +851,18 @@ "refs": { "Build$id": "

The unique ID for the build.

", "Build$arn": "

The Amazon Resource Name (ARN) of the build.

", - "Build$sourceVersion": "

Any version identifier for the version of the source code to be built. If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

", - "Build$resolvedSourceVersion": "

An identifier for the version of this build's source code.

", - "Build$projectName": "

The name of the CodeBuild project.

", + "Build$sourceVersion": "

Any version identifier for the version of the source code to be built. If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

", + "Build$resolvedSourceVersion": "

An identifier for the version of this build's source code.

", + "Build$projectName": "

The name of the AWS CodeBuild project.

", "Build$serviceRole": "

The name of a service role used for this build.

", - "Build$encryptionKey": "

The Key Management Service customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", + "Build$encryptionKey": "

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", "BuildBatch$id": "

The identifier of the batch build.

", "BuildBatch$arn": "

The ARN of the batch build.

", "BuildBatch$sourceVersion": "

The identifier of the version of the source code to be built.

", - "BuildBatch$resolvedSourceVersion": "

The identifier of the resolved version of this batch build's source code.

", + "BuildBatch$resolvedSourceVersion": "

The identifier of the resolved version of this batch build's source code.

", "BuildBatch$projectName": "

The name of the batch build project.

", "BuildBatch$serviceRole": "

The name of a service role used for builds in the batch.

", - "BuildBatch$encryptionKey": "

The Key Management Service customer master key (CMK) to be used for encrypting the batch build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", + "BuildBatch$encryptionKey": "

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the batch build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", "BuildBatchIds$member": null, "BuildIds$member": null, "BuildNotDeleted$id": "

The ID of the build that could not be successfully deleted.

", @@ -862,8 +870,8 @@ "CodeCoverage$reportARN": "

The ARN of the report.

", "CodeCoverage$filePath": "

The path of the test report file.

", "ComputeTypesAllowed$member": null, - "CreateProjectInput$serviceRole": "

The ARN of the Identity and Access Management role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.

", - "CreateProjectInput$encryptionKey": "

The Key Management Service customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", + "CreateProjectInput$serviceRole": "

The ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.

", + "CreateProjectInput$encryptionKey": "

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", "DebugSession$sessionTarget": "

Contains the identifier of the Session Manager session used for the build. To work with the paused build, you open this session to examine, control, and resume the build.

", "DeleteBuildBatchInput$id": "

The identifier of the batch build to delete.

", "DeleteProjectInput$name": "

The name of the build project.

", @@ -881,23 +889,23 @@ "Identifiers$member": null, "ImportSourceCredentialsInput$username": "

The Bitbucket username when the authType is BASIC_AUTH. This parameter is not valid for other types of source providers or connections.

", "ImportSourceCredentialsOutput$arn": "

The Amazon Resource Name (ARN) of the token.

", - "InvalidateProjectCacheInput$projectName": "

The name of the CodeBuild build project that the cache is reset for.

", + "InvalidateProjectCacheInput$projectName": "

The name of the AWS CodeBuild build project that the cache is reset for.

", "ListBuildBatchesForProjectInput$projectName": "

The name of the project.

", - "ListBuildsForProjectInput$projectName": "

The name of the CodeBuild project.

", + "ListBuildsForProjectInput$projectName": "

The name of the AWS CodeBuild project.

", "ListProjectsInput$nextToken": "

During a previous call, if there are more than 100 items in the list, only the first 100 items are returned, along with a unique string called a nextToken. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

", "ListSharedProjectsInput$nextToken": "

During a previous call, the maximum number of items that can be returned is the value specified in maxResults. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

", "NetworkInterface$subnetId": "

The ID of the subnet.

", "NetworkInterface$networkInterfaceId": "

The ID of the network interface.

", - "Project$serviceRole": "

The ARN of the Identity and Access Management role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.

", - "Project$encryptionKey": "

The Key Management Service customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>). If you don't specify a value, CodeBuild uses the managed CMK for Amazon Simple Storage Service (Amazon S3).

", + "Project$serviceRole": "

The ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.

", + "Project$encryptionKey": "

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", "ProjectArns$member": null, "ProjectBuildBatchConfig$serviceRole": "

Specifies the service role ARN for the batch build project.

", - "ProjectEnvironment$image": "

The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:

For more information, see Docker images provided by CodeBuild in the CodeBuild user guide.

", + "ProjectEnvironment$image": "

The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:

", "ProjectNames$member": null, - "PutResourcePolicyInput$policy": "

A JSON-formatted resource policy. For more information, see Sharing a Project and Sharing a Report Group in the CodeBuild User Guide.

", + "PutResourcePolicyInput$policy": "

A JSON-formatted resource policy. For more information, see Sharing a Project and Sharing a Report Group in the AWS CodeBuild User Guide.

", "PutResourcePolicyInput$resourceArn": "

The ARN of the Project or ReportGroup resource you want to associate with a resource policy.

", "PutResourcePolicyOutput$resourceArn": "

The ARN of the Project or ReportGroup resource that is associated with a resource policy.

", - "RegistryCredential$credential": "

The Amazon Resource Name (ARN) or name of credentials created using Secrets Manager.

The credential can use the name of the credentials only if they exist in your current Region.

", + "RegistryCredential$credential": "

The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets Manager.

The credential can use the name of the credentials only if they exist in your current AWS Region.

", "Report$arn": "

The ARN of the report run.

", "Report$reportGroupArn": "

The ARN of the report group associated with this report.

", "ReportArns$member": null, @@ -913,22 +921,22 @@ "StartBuildBatchInput$projectName": "

The name of the project.

", "StartBuildBatchInput$imageOverride": "

The name of an image for this batch build that overrides the one specified in the batch build project.

", "StartBuildBatchInput$serviceRoleOverride": "

The name of a service role for this batch build that overrides the one specified in the batch build project.

", - "StartBuildBatchInput$encryptionKeyOverride": "

The Key Management Service customer master key (CMK) that overrides the one specified in the batch build project. The CMK key encrypts the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", - "StartBuildInput$projectName": "

The name of the CodeBuild build project to start running a build.

", + "StartBuildBatchInput$encryptionKeyOverride": "

The AWS Key Management Service (AWS KMS) customer master key (CMK) that overrides the one specified in the batch build project. The CMK key encrypts the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", + "StartBuildInput$projectName": "

The name of the AWS CodeBuild build project to start running a build.

", "StartBuildInput$imageOverride": "

The name of an image for this build that overrides the one specified in the build project.

", "StartBuildInput$serviceRoleOverride": "

The name of a service role for this build that overrides the one specified in the build project.

", - "StartBuildInput$encryptionKeyOverride": "

The Key Management Service customer master key (CMK) that overrides the one specified in the build project. The CMK key encrypts the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", + "StartBuildInput$encryptionKeyOverride": "

The AWS Key Management Service (AWS KMS) customer master key (CMK) that overrides the one specified in the build project. The CMK key encrypts the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", "StopBuildBatchInput$id": "

The identifier of the batch build to stop.

", "StopBuildInput$id": "

The ID of the build.

", "Subnets$member": null, "TestCase$reportArn": "

The ARN of the report to which the test case belongs.

", "UpdateProjectInput$name": "

The name of the build project.

You cannot change a build project's name.

", - "UpdateProjectInput$serviceRole": "

The replacement ARN of the Identity and Access Management role that enables CodeBuild to interact with dependent Amazon Web Services services on behalf of the Amazon Web Services account.

", - "UpdateProjectInput$encryptionKey": "

The Key Management Service customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", + "UpdateProjectInput$serviceRole": "

The replacement ARN of the AWS Identity and Access Management (IAM) role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.

", + "UpdateProjectInput$encryptionKey": "

The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.

You can use a cross-account KMS key to encrypt the build output artifacts if your service role has permission to that key.

You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, the CMK's alias (using the format alias/<alias-name>).

", "UpdateReportGroupInput$arn": "

The ARN of the report group to update.

", "VpcConfig$vpcId": "

The ID of the Amazon VPC.

", "Webhook$url": "

The URL to the webhook.

", - "Webhook$payloadUrl": "

The CodeBuild endpoint where webhook events are sent.

", + "Webhook$payloadUrl": "

The AWS CodeBuild endpoint where webhook events are sent.

", "Webhook$secret": "

The secret token of the associated repository.

A Bitbucket webhook does not support secret.

" } }, @@ -1006,7 +1014,7 @@ "ProjectArns": { "base": null, "refs": { - "ListSharedProjectsOutput$projects": "

The list of ARNs for the build projects shared with the current Amazon Web Services account or user.

" + "ListSharedProjectsOutput$projects": "

The list of ARNs for the build projects shared with the current AWS account or user.

" } }, "ProjectArtifacts": { @@ -1102,16 +1110,16 @@ "base": null, "refs": { "CreateProjectInput$name": "

The name of the build project.

", - "CreateWebhookInput$projectName": "

The name of the CodeBuild project.

", - "DeleteWebhookInput$projectName": "

The name of the CodeBuild project.

", + "CreateWebhookInput$projectName": "

The name of the AWS CodeBuild project.

", + "DeleteWebhookInput$projectName": "

The name of the AWS CodeBuild project.

", "Project$name": "

The name of the build project.

", - "UpdateWebhookInput$projectName": "

The name of the CodeBuild project.

" + "UpdateWebhookInput$projectName": "

The name of the AWS CodeBuild project.

" } }, "ProjectNames": { "base": null, "refs": { - "BatchGetProjectsInput$names": "

The names or ARNs of the build projects. To get information about a project shared with your Amazon Web Services account, its ARN must be specified. You cannot specify a shared project using its name.

", + "BatchGetProjectsInput$names": "

The names or ARNs of the build projects. To get information about a project shared with your AWS account, its ARN must be specified. You cannot specify a shared project using its name.

", "BatchGetProjectsOutput$projectsNotFound": "

The names of build projects for which information could not be found.

", "ListProjectsOutput$projects": "

The list of build project names, with each build project name representing a single build project.

" } @@ -1119,8 +1127,8 @@ "ProjectSecondarySourceVersions": { "base": null, "refs": { - "Build$secondarySourceVersions": "

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

", - "BuildBatch$secondarySourceVersions": "

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

", + "Build$secondarySourceVersions": "

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

", + "BuildBatch$secondarySourceVersions": "

An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one of:

", "CreateProjectInput$secondarySourceVersions": "

An array of ProjectSourceVersion objects. If secondarySourceVersions is specified at the build level, then they take precedence over these secondarySourceVersions (at the project level).

", "Project$secondarySourceVersions": "

An array of ProjectSourceVersion objects. If secondarySourceVersions is specified at the build level, then they take over these secondarySourceVersions (at the project level).

", "StartBuildBatchInput$secondarySourcesVersionOverride": "

An array of ProjectSourceVersion objects that override the secondary source versions in the batch build project.

", @@ -1180,7 +1188,7 @@ } }, "RegistryCredential": { - "base": "

Information about credentials that provide access to a private Docker registry. When this is set:

For more information, see Private Registry with Secrets Manager Sample for CodeBuild.

", + "base": "

Information about credentials that provide access to a private Docker registry. When this is set:

For more information, see Private Registry with AWS Secrets Manager Sample for AWS CodeBuild.

", "refs": { "ProjectEnvironment$registryCredential": "

The credentials for access to a private registry.

", "StartBuildBatchInput$registryCredentialOverride": "

A RegistryCredential object that overrides credentials for access to a private registry.

", @@ -1199,7 +1207,7 @@ "BatchGetReportsInput$reportArns": "

An array of ARNs that identify the Report objects to return.

", "BatchGetReportsOutput$reportsNotFound": "

An array of ARNs passed to BatchGetReportGroups that are not associated with a Report.

", "ListReportsForReportGroupOutput$reports": "

The list of report ARNs.

", - "ListReportsOutput$reports": "

The list of returned ARNs for the reports in the current Amazon Web Services account.

" + "ListReportsOutput$reports": "

The list of returned ARNs for the reports in the current AWS account.

" } }, "ReportCodeCoverageSortByType": { @@ -1243,8 +1251,8 @@ "refs": { "BatchGetReportGroupsInput$reportGroupArns": "

An array of report group ARNs that identify the report groups to return.

", "BatchGetReportGroupsOutput$reportGroupsNotFound": "

An array of ARNs passed to BatchGetReportGroups that are not associated with a ReportGroup.

", - "ListReportGroupsOutput$reportGroups": "

The list of ARNs for the report groups in the current Amazon Web Services account.

", - "ListSharedReportGroupsOutput$reportGroups": "

The list of ARNs for the report groups shared with the current Amazon Web Services account or user.

" + "ListReportGroupsOutput$reportGroups": "

The list of ARNs for the report groups in the current AWS account.

", + "ListSharedReportGroupsOutput$reportGroups": "

The list of ARNs for the report groups shared with the current AWS account or user.

" } }, "ReportGroupName": { @@ -1293,7 +1301,7 @@ "ReportPackagingType": { "base": null, "refs": { - "S3ReportExportConfig$packaging": "

The type of build output artifact to create. Valid values include:

" + "S3ReportExportConfig$packaging": "

The type of build output artifact to create. Valid values include:

" } }, "ReportStatusCounts": { @@ -1343,12 +1351,12 @@ } }, "ResourceAlreadyExistsException": { - "base": "

The specified Amazon Web Services resource cannot be created, because an Amazon Web Services resource with the same settings already exists.

", + "base": "

The specified AWS resource cannot be created, because an AWS resource with the same settings already exists.

", "refs": { } }, "ResourceNotFoundException": { - "base": "

The specified Amazon Web Services resource cannot be found.

", + "base": "

The specified AWS resource cannot be found.

", "refs": { } }, @@ -1413,8 +1421,8 @@ "SharedResourceSortByType": { "base": null, "refs": { - "ListSharedProjectsInput$sortBy": "

The criterion to be used to list build projects shared with the current Amazon Web Services account or user. Valid values include:

", - "ListSharedReportGroupsInput$sortBy": "

The criterion to be used to list report groups shared with the current Amazon Web Services account or user. Valid values include:

" + "ListSharedProjectsInput$sortBy": "

The criterion to be used to list build projects shared with the current AWS account or user. Valid values include:

", + "ListSharedReportGroupsInput$sortBy": "

The criterion to be used to list report groups shared with the current AWS account or user. Valid values include:

" } }, "SortOrderType": { @@ -1434,9 +1442,9 @@ } }, "SourceAuth": { - "base": "

Information about the authorization settings for CodeBuild to access the source code to be built.

This information is for the CodeBuild console's use only. Your code should not get or set this information directly.

", + "base": "

Information about the authorization settings for AWS CodeBuild to access the source code to be built.

This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly.

", "refs": { - "ProjectSource$auth": "

Information about the authorization settings for CodeBuild to access the source code to be built.

This information is for the CodeBuild console's use only. Your code should not get or set this information directly.

", + "ProjectSource$auth": "

Information about the authorization settings for AWS CodeBuild to access the source code to be built.

This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly.

", "StartBuildBatchInput$sourceAuthOverride": "

A SourceAuth object that overrides the one defined in the batch build project. This override applies only if the build project's source is BitBucket or GitHub.

", "StartBuildInput$sourceAuthOverride": "

An authorization type for this build that overrides the one defined in the build project. This override applies only if the build project's source is BitBucket or GitHub.

" } @@ -1462,7 +1470,7 @@ "SourceType": { "base": null, "refs": { - "ProjectSource$type": "

The type of repository that contains the source code to be built. Valid values include:

", + "ProjectSource$type": "

The type of repository that contains the source code to be built. Valid values include:

", "StartBuildBatchInput$sourceTypeOverride": "

The source input type that overrides the source input defined in the batch build project.

", "StartBuildInput$sourceTypeOverride": "

A source input type, for this build, that overrides the source input defined in the build project.

" } @@ -1522,23 +1530,23 @@ "base": null, "refs": { "Build$currentPhase": "

The current build phase.

", - "Build$initiator": "

The entity that started the build. Valid values include:

", + "Build$initiator": "

The entity that started the build. Valid values include:

", "Build$buildBatchArn": "

The ARN of the batch build that this build is a member of, if applicable.

", "BuildArtifacts$location": "

Information about the location of the build artifacts.

", "BuildArtifacts$sha256sum": "

The SHA-256 hash of the build artifact.

You can use this hash along with a checksum tool to confirm file integrity and authenticity.

This value is available only if the build project's packaging value is set to ZIP.

", "BuildArtifacts$md5sum": "

The MD5 hash of the build artifact.

You can use this hash along with a checksum tool to confirm file integrity and authenticity.

This value is available only if the build project's packaging value is set to ZIP.

", "BuildArtifacts$artifactIdentifier": "

An identifier for this artifact definition.

", "BuildBatch$currentPhase": "

The current phase of the batch build.

", - "BuildBatch$initiator": "

The entity that started the batch build. Valid values include:

", + "BuildBatch$initiator": "

The entity that started the batch build. Valid values include:

", "BuildGroup$identifier": "

Contains the identifier of the build group.

", "BuildNotDeleted$statusCode": "

Additional information about the build that could not be successfully deleted.

", "BuildReportArns$member": null, "BuildStatusConfig$context": "

Specifies the context of the build status CodeBuild sends to the source provider. The usage of this parameter depends on the source provider.

Bitbucket

This parameter is used for the name parameter in the Bitbucket commit status. For more information, see build in the Bitbucket API documentation.

GitHub/GitHub Enterprise Server

This parameter is used for the context parameter in the GitHub commit status. For more information, see Create a commit status in the GitHub developer guide.

", "BuildStatusConfig$targetUrl": "

Specifies the target url of the build status CodeBuild sends to the source provider. The usage of this parameter depends on the source provider.

Bitbucket

This parameter is used for the url parameter in the Bitbucket commit status. For more information, see build in the Bitbucket API documentation.

GitHub/GitHub Enterprise Server

This parameter is used for the target_url parameter in the GitHub commit status. For more information, see Create a commit status in the GitHub developer guide.

", "BuildSummary$arn": "

The batch build ARN.

", - "CloudWatchLogsConfig$groupName": "

The group name of the logs in CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.

", - "CloudWatchLogsConfig$streamName": "

The prefix of the stream name of the CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.

", - "CreateProjectInput$sourceVersion": "

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

", + "CloudWatchLogsConfig$groupName": "

The group name of the logs in Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.

", + "CloudWatchLogsConfig$streamName": "

The prefix of the stream name of the Amazon CloudWatch Logs. For more information, see Working with Log Groups and Log Streams.

", + "CreateProjectInput$sourceVersion": "

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

", "CreateWebhookInput$branchFilter": "

A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter is empty, then all branches are built.

It is recommended that you use filterGroups instead of branchFilter.

", "DeleteBuildBatchOutput$statusCode": "

The status code.

", "DescribeCodeCoveragesInput$nextToken": "

The nextToken value returned from a previous call to DescribeCodeCoverages. This specifies the next item to return. To return the beginning of the list, exclude this parameter.

", @@ -1548,7 +1556,7 @@ "DescribeTestCasesOutput$nextToken": "

During a previous call, the maximum number of items that can be returned is the value specified in maxResults. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

", "EnvironmentImage$name": "

The name of the Docker image.

", "EnvironmentImage$description": "

The description of the Docker image.

", - "EnvironmentVariable$value": "

The value of the environment variable.

We strongly discourage the use of PLAINTEXT environment variables to store sensitive values, especially Amazon Web Services secret key IDs and secret access keys. PLAINTEXT environment variables can be displayed in plain text using the CodeBuild console and the AWS Command Line Interface (AWS CLI). For sensitive values, we recommend you use an environment variable of type PARAMETER_STORE or SECRETS_MANAGER.

", + "EnvironmentVariable$value": "

The value of the environment variable.

We strongly discourage the use of PLAINTEXT environment variables to store sensitive values, especially AWS secret key IDs and secret access keys. PLAINTEXT environment variables can be displayed in plain text using the AWS CodeBuild console and the AWS Command Line Interface (AWS CLI). For sensitive values, we recommend you use an environment variable of type PARAMETER_STORE or SECRETS_MANAGER.

", "ExportedEnvironmentVariable$value": "

The value assigned to the exported environment variable.

", "ImageVersions$member": null, "ListBuildBatchesForProjectInput$nextToken": "

The nextToken value returned from a previous call to ListBuildBatchesForProject. This specifies the next item to return. To return the beginning of the list, exclude this parameter.

", @@ -1570,32 +1578,32 @@ "ListSharedProjectsOutput$nextToken": "

During a previous call, the maximum number of items that can be returned is the value specified in maxResults. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

", "ListSharedReportGroupsInput$nextToken": "

During a previous call, the maximum number of items that can be returned is the value specified in maxResults. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

", "ListSharedReportGroupsOutput$nextToken": "

During a previous call, the maximum number of items that can be returned is the value specified in maxResults. If there more items in the list, then a unique string called a nextToken is returned. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

", - "LogsLocation$groupName": "

The name of the CloudWatch Logs group for the build logs.

", - "LogsLocation$streamName": "

The name of the CloudWatch Logs stream for the build logs.

", - "LogsLocation$deepLink": "

The URL to an individual build log in CloudWatch Logs.

", + "LogsLocation$groupName": "

The name of the Amazon CloudWatch Logs group for the build logs.

", + "LogsLocation$streamName": "

The name of the Amazon CloudWatch Logs stream for the build logs.

", + "LogsLocation$deepLink": "

The URL to an individual build log in Amazon CloudWatch Logs.

", "LogsLocation$s3DeepLink": "

The URL to a build log in an S3 bucket.

", - "LogsLocation$cloudWatchLogsArn": "

The ARN of CloudWatch Logs for a build project. Its format is arn:${Partition}:logs:${Region}:${Account}:log-group:${LogGroupName}:log-stream:${LogStreamName}. For more information, see Resources Defined by CloudWatch Logs.

", + "LogsLocation$cloudWatchLogsArn": "

The ARN of Amazon CloudWatch Logs for a build project. Its format is arn:${Partition}:logs:${Region}:${Account}:log-group:${LogGroupName}:log-stream:${LogStreamName}. For more information, see Resources Defined by Amazon CloudWatch Logs.

", "LogsLocation$s3LogsArn": "

The ARN of S3 logs for a build project. Its format is arn:${Partition}:s3:::${BucketName}/${ObjectName}. For more information, see Resources Defined by Amazon S3.

", "PhaseContext$statusCode": "

The status code for the context of the build phase.

", "PhaseContext$message": "

An explanation of the build phase's context. This might include a command ID and an exit code.

", "Project$arn": "

The Amazon Resource Name (ARN) of the build project.

", - "Project$sourceVersion": "

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

", - "ProjectArtifacts$location": "

Information about the build output artifact location:

", - "ProjectArtifacts$path": "

Along with namespaceType and name, the pattern that CodeBuild uses to name and store the output artifact:

For example, if path is set to MyArtifacts, namespaceType is set to NONE, and name is set to MyArtifact.zip, the output artifact is stored in the output bucket at MyArtifacts/MyArtifact.zip.

", - "ProjectArtifacts$name": "

Along with path and namespaceType, the pattern that CodeBuild uses to name and store the output artifact:

For example:

", + "Project$sourceVersion": "

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

", + "ProjectArtifacts$location": "

Information about the build output artifact location:

", + "ProjectArtifacts$path": "

Along with namespaceType and name, the pattern that AWS CodeBuild uses to name and store the output artifact:

For example, if path is set to MyArtifacts, namespaceType is set to NONE, and name is set to MyArtifact.zip, the output artifact is stored in the output bucket at MyArtifacts/MyArtifact.zip.

", + "ProjectArtifacts$name": "

Along with path and namespaceType, the pattern that AWS CodeBuild uses to name and store the output artifact:

For example:

", "ProjectArtifacts$artifactIdentifier": "

An identifier for this artifact definition.

", "ProjectBadge$badgeRequestUrl": "

The publicly-accessible URL through which you can access the build badge for your project.

", "ProjectCache$location": "

Information about the cache location:

", - "ProjectEnvironment$certificate": "

The ARN of the Amazon S3 bucket, path prefix, and object key that contains the PEM-encoded certificate for the build project. For more information, see certificate in the CodeBuild User Guide.

", - "ProjectFileSystemLocation$location": "

A string that specifies the location of the file system created by Amazon EFS. Its format is efs-dns-name:/directory-path. You can find the DNS name of file system when you view it in the Amazon EFS console. The directory path is a path to a directory in the file system that CodeBuild mounts. For example, if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com, and its mount directory is my-efs-mount-directory, then the location is fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory.

The directory path in the format efs-dns-name:/directory-path is optional. If you do not specify a directory path, the location is only the DNS name and CodeBuild mounts the entire file system.

", + "ProjectEnvironment$certificate": "

The ARN of the Amazon S3 bucket, path prefix, and object key that contains the PEM-encoded certificate for the build project. For more information, see certificate in the AWS CodeBuild User Guide.

", + "ProjectFileSystemLocation$location": "

A string that specifies the location of the file system created by Amazon EFS. Its format is efs-dns-name:/directory-path. You can find the DNS name of file system when you view it in the AWS EFS console. The directory path is a path to a directory in the file system that CodeBuild mounts. For example, if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com, and its mount directory is my-efs-mount-directory, then the location is fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory.

The directory path in the format efs-dns-name:/directory-path is optional. If you do not specify a directory path, the location is only the DNS name and CodeBuild mounts the entire file system.

", "ProjectFileSystemLocation$mountPoint": "

The location in the container where you mount the file system.

", "ProjectFileSystemLocation$identifier": "

The name used to access a file system created by Amazon EFS. CodeBuild creates an environment variable by appending the identifier in all capital letters to CODEBUILD_. For example, if you specify my_efs for identifier, a new environment variable is create named CODEBUILD_MY_EFS.

The identifier is used to mount your file system.

", - "ProjectFileSystemLocation$mountOptions": "

The mount options for a file system created by Amazon EFS. The default mount options used by CodeBuild are nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2. For more information, see Recommended NFS Mount Options.

", - "ProjectSource$location": "

Information about the location of the source code to be built. Valid values include:

If you specify CODEPIPELINE for the Type property, don't specify this property. For all of the other types, you must specify Location.

", - "ProjectSource$buildspec": "

The buildspec file declaration to use for the builds in this build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

", + "ProjectFileSystemLocation$mountOptions": "

The mount options for a file system created by AWS EFS. The default mount options used by CodeBuild are nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2. For more information, see Recommended NFS Mount Options.

", + "ProjectSource$location": "

Information about the location of the source code to be built. Valid values include:

", + "ProjectSource$buildspec": "

The buildspec file declaration to use for the builds in this build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same AWS Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

", "ProjectSource$sourceIdentifier": "

An identifier for this project source. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length.

", "ProjectSourceVersion$sourceIdentifier": "

An identifier for a source in the build project. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length.

", - "ProjectSourceVersion$sourceVersion": "

The source version for the corresponding source identifier. If specified, must be one of:

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

", + "ProjectSourceVersion$sourceVersion": "

The source version for the corresponding source identifier. If specified, must be one of:

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

", "Report$name": "

The name of the report that was run.

", "Report$executionId": "

The ARN of the build run that generated this report.

", "ReportGroupTrendStats$average": "

Contains the average of all values analyzed.

", @@ -1605,22 +1613,22 @@ "ReportWithRawData$data": "

The value of the requested data field from the report.

", "ResolvedArtifact$location": "

The location of the artifact.

", "ResolvedArtifact$identifier": "

The identifier of the artifact.

", - "RetryBuildBatchInput$idempotencyToken": "

A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuildBatch request. The token is included in the RetryBuildBatch request and is valid for five minutes. If you repeat the RetryBuildBatch request with the same token, but change a parameter, CodeBuild returns a parameter mismatch error.

", - "RetryBuildInput$idempotencyToken": "

A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuild request. The token is included in the RetryBuild request and is valid for five minutes. If you repeat the RetryBuild request with the same token, but change a parameter, CodeBuild returns a parameter mismatch error.

", + "RetryBuildBatchInput$idempotencyToken": "

A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuildBatch request. The token is included in the RetryBuildBatch request and is valid for five minutes. If you repeat the RetryBuildBatch request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

", + "RetryBuildInput$idempotencyToken": "

A unique, case sensitive identifier you provide to ensure the idempotency of the RetryBuild request. The token is included in the RetryBuild request and is valid for five minutes. If you repeat the RetryBuild request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

", "S3LogsConfig$location": "

The ARN of an S3 bucket and the path prefix for S3 logs. If your Amazon S3 bucket name is my-bucket, and your path prefix is build-log, then acceptable formats are my-bucket/build-log or arn:aws:s3:::my-bucket/build-log.

", - "S3ReportExportConfig$bucketOwner": "

The Amazon Web Services account identifier of the owner of the Amazon S3 bucket. This allows report data to be exported to an Amazon S3 bucket that is owned by an account other than the account running the build.

", + "S3ReportExportConfig$bucketOwner": "

The AWS account identifier of the owner of the Amazon S3 bucket. This allows report data to be exported to an Amazon S3 bucket that is owned by an account other than the account running the build.

", "S3ReportExportConfig$path": "

The path to the exported report's raw data results.

", "SourceAuth$resource": "

The resource value that applies to the specified authorization type.

", - "StartBuildBatchInput$sourceVersion": "

The version of the batch build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon S3

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

", + "StartBuildBatchInput$sourceVersion": "

The version of the batch build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

AWS CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon S3

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

", "StartBuildBatchInput$sourceLocationOverride": "

A location that overrides, for this batch build, the source location defined in the batch build project.

", - "StartBuildBatchInput$buildspecOverride": "

A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

", + "StartBuildBatchInput$buildspecOverride": "

A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same AWS Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

", "StartBuildBatchInput$certificateOverride": "

The name of a certificate for this batch build that overrides the one specified in the batch build project.

", - "StartBuildBatchInput$idempotencyToken": "

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuildBatch request. The token is included in the StartBuildBatch request and is valid for five minutes. If you repeat the StartBuildBatch request with the same token, but change a parameter, CodeBuild returns a parameter mismatch error.

", - "StartBuildInput$sourceVersion": "

The version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon S3

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

", + "StartBuildBatchInput$idempotencyToken": "

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuildBatch request. The token is included in the StartBuildBatch request and is valid for five minutes. If you repeat the StartBuildBatch request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

", + "StartBuildInput$sourceVersion": "

The version of the build input to be built, for this build only. If not specified, the latest version is used. If specified, the contents depends on the source provider:

AWS CodeCommit

The commit ID, branch, or Git tag to use.

GitHub

The commit ID, pull request ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a pull request ID is specified, it must use the format pr/pull-request-ID (for example pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Bitbucket

The commit ID, branch name, or tag name that corresponds to the version of the source code you want to build. If a branch name is specified, the branch's HEAD commit ID is used. If not specified, the default branch's HEAD commit ID is used.

Amazon S3

The version ID of the object that represents the build input ZIP file to use.

If sourceVersion is specified at the project level, then this sourceVersion (at the build level) takes precedence.

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

", "StartBuildInput$sourceLocationOverride": "

A location that overrides, for this build, the source location for the one defined in the build project.

", - "StartBuildInput$buildspecOverride": "

A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

", + "StartBuildInput$buildspecOverride": "

A buildspec file declaration that overrides, for this build only, the latest one already defined in the build project.

If this value is set, it can be either an inline buildspec definition, the path to an alternate buildspec file relative to the value of the built-in CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The bucket must be in the same AWS Region as the build project. Specify the buildspec file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). If this value is not provided or is set to an empty string, the source code must contain a buildspec file in its root directory. For more information, see Buildspec File Name and Storage Location.

", "StartBuildInput$certificateOverride": "

The name of a certificate for this build that overrides the one specified in the build project.

", - "StartBuildInput$idempotencyToken": "

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuild request. The token is included in the StartBuild request and is valid for 5 minutes. If you repeat the StartBuild request with the same token, but change a parameter, CodeBuild returns a parameter mismatch error.

", + "StartBuildInput$idempotencyToken": "

A unique, case sensitive identifier you provide to ensure the idempotency of the StartBuild request. The token is included in the StartBuild request and is valid for 5 minutes. If you repeat the StartBuild request with the same token, but change a parameter, AWS CodeBuild returns a parameter mismatch error.

", "TestCase$testRawDataPath": "

The path to the raw data file that contains the test result.

", "TestCase$prefix": "

A string that is applied to a series of related test cases. CodeBuild generates the prefix. The prefix depends on the framework used to generate the tests.

", "TestCase$name": "

The name of the test case.

", @@ -1628,7 +1636,7 @@ "TestCase$message": "

A message associated with a test case. For example, an error message or stack trace.

", "TestCaseFilter$status": "

The status used to filter test cases. A TestCaseFilter can have one status. Valid values are:

", "TestCaseFilter$keyword": "

A keyword that is used to filter on the name or the prefix of the test cases. Only test cases where the keyword is a substring of the name or the prefix will be returned.

", - "UpdateProjectInput$sourceVersion": "

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the CodeBuild User Guide.

", + "UpdateProjectInput$sourceVersion": "

A version of the build input to be built for this project. If not specified, the latest version is used. If specified, it must be one of:

If sourceVersion is specified at the build level, then that version takes precedence over this sourceVersion (at the project level).

For more information, see Source Version Sample with CodeBuild in the AWS CodeBuild User Guide.

", "UpdateWebhookInput$branchFilter": "

A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter is empty, then all branches are built.

It is recommended that you use filterGroups instead of branchFilter.

", "Webhook$branchFilter": "

A regular expression used to determine which repository branches are built when a webhook is triggered. If the name of a branch matches the regular expression, then it is built. If branchFilter is empty, then all branches are built.

It is recommended that you use filterGroups instead of branchFilter.

", "WebhookFilter$pattern": "

For a WebHookFilter that uses EVENT type, a comma-separated string that specifies one or more events. For example, the webhook filter PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED allows all push, pull request created, and pull request updated events to trigger a build.

For a WebHookFilter that uses any of the other filter types, a regular expression pattern. For example, a WebHookFilter that uses HEAD_REF for its type and the pattern ^refs/heads/ triggers a build when the head reference is a branch with a reference name refs/heads/branch-name.

" @@ -1641,7 +1649,7 @@ } }, "Tag": { - "base": "

A tag, consisting of a key and a value.

This tag is available for use by Amazon Web Services services that support tags in CodeBuild.

", + "base": "

A tag, consisting of a key and a value.

This tag is available for use by AWS services that support tags in AWS CodeBuild.

", "refs": { "TagList$member": null } @@ -1649,12 +1657,12 @@ "TagList": { "base": null, "refs": { - "CreateProjectInput$tags": "

A list of tag key and value pairs associated with this build project.

These tags are available for use by Amazon Web Services services that support CodeBuild build project tags.

", - "CreateReportGroupInput$tags": "

A list of tag key and value pairs associated with this report group.

These tags are available for use by Amazon Web Services services that support CodeBuild report group tags.

", - "Project$tags": "

A list of tag key and value pairs associated with this build project.

These tags are available for use by Amazon Web Services services that support CodeBuild build project tags.

", - "ReportGroup$tags": "

A list of tag key and value pairs associated with this report group.

These tags are available for use by Amazon Web Services services that support CodeBuild report group tags.

", - "UpdateProjectInput$tags": "

An updated list of tag key and value pairs associated with this build project.

These tags are available for use by Amazon Web Services services that support CodeBuild build project tags.

", - "UpdateReportGroupInput$tags": "

An updated list of tag key and value pairs associated with this report group.

These tags are available for use by Amazon Web Services services that support CodeBuild report group tags.

" + "CreateProjectInput$tags": "

A list of tag key and value pairs associated with this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

", + "CreateReportGroupInput$tags": "

A list of tag key and value pairs associated with this report group.

These tags are available for use by AWS services that support AWS CodeBuild report group tags.

", + "Project$tags": "

A list of tag key and value pairs associated with this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

", + "ReportGroup$tags": "

A list of tag key and value pairs associated with this report group.

These tags are available for use by AWS services that support AWS CodeBuild report group tags.

", + "UpdateProjectInput$tags": "

An updated list of tag key and value pairs associated with this build project.

These tags are available for use by AWS services that support AWS CodeBuild build project tags.

", + "UpdateReportGroupInput$tags": "

An updated list of tag key and value pairs associated with this report group.

These tags are available for use by AWS services that support AWS CodeBuild report group tags.

" } }, "TestCase": { @@ -1684,15 +1692,15 @@ "TimeOut": { "base": null, "refs": { - "CreateProjectInput$timeoutInMinutes": "

How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before it times out any build that has not been marked as completed. The default is 60 minutes.

", + "CreateProjectInput$timeoutInMinutes": "

How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before it times out any build that has not been marked as completed. The default is 60 minutes.

", "CreateProjectInput$queuedTimeoutInMinutes": "

The number of minutes a build is allowed to be queued before it times out.

", - "Project$timeoutInMinutes": "

How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed. The default is 60 minutes.

", + "Project$timeoutInMinutes": "

How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before timing out any related build that did not get marked as completed. The default is 60 minutes.

", "Project$queuedTimeoutInMinutes": "

The number of minutes a build is allowed to be queued before it times out.

", "StartBuildBatchInput$buildTimeoutInMinutesOverride": "

Overrides the build timeout specified in the batch build project.

", "StartBuildBatchInput$queuedTimeoutInMinutesOverride": "

The number of minutes a batch build is allowed to be queued before it times out.

", "StartBuildInput$timeoutInMinutesOverride": "

The number of build timeout minutes, from 5 to 480 (8 hours), that overrides, for this build only, the latest setting already defined in the build project.

", "StartBuildInput$queuedTimeoutInMinutesOverride": "

The number of minutes a build is allowed to be queued before it times out.

", - "UpdateProjectInput$timeoutInMinutes": "

The replacement value in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before timing out any related build that did not get marked as completed.

", + "UpdateProjectInput$timeoutInMinutes": "

The replacement value in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait before timing out any related build that did not get marked as completed.

", "UpdateProjectInput$queuedTimeoutInMinutes": "

The number of minutes a build is allowed to be queued before it times out.

" } }, @@ -1756,21 +1764,21 @@ } }, "VpcConfig": { - "base": "

Information about the VPC configuration that CodeBuild accesses.

", + "base": "

Information about the VPC configuration that AWS CodeBuild accesses.

", "refs": { - "Build$vpcConfig": "

If your CodeBuild project accesses resources in an Amazon VPC, you provide this parameter that identifies the VPC ID and the list of security group IDs and subnet IDs. The security groups and subnets must belong to the same VPC. You must provide at least one security group and one subnet ID.

", + "Build$vpcConfig": "

If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide this parameter that identifies the VPC ID and the list of security group IDs and subnet IDs. The security groups and subnets must belong to the same VPC. You must provide at least one security group and one subnet ID.

", "BuildBatch$vpcConfig": null, - "CreateProjectInput$vpcConfig": "

VpcConfig enables CodeBuild to access resources in an Amazon VPC.

", - "Project$vpcConfig": "

Information about the VPC configuration that CodeBuild accesses.

", - "UpdateProjectInput$vpcConfig": "

VpcConfig enables CodeBuild to access resources in an Amazon VPC.

" + "CreateProjectInput$vpcConfig": "

VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC.

", + "Project$vpcConfig": "

Information about the VPC configuration that AWS CodeBuild accesses.

", + "UpdateProjectInput$vpcConfig": "

VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC.

" } }, "Webhook": { - "base": "

Information about a webhook that connects repository events to a build project in CodeBuild.

", + "base": "

Information about a webhook that connects repository events to a build project in AWS CodeBuild.

", "refs": { - "CreateWebhookOutput$webhook": "

Information about a webhook that connects repository events to a build project in CodeBuild.

", - "Project$webhook": "

Information about a webhook that connects repository events to a build project in CodeBuild.

", - "UpdateWebhookOutput$webhook": "

Information about a repository's webhook that is associated with a project in CodeBuild.

" + "CreateWebhookOutput$webhook": "

Information about a webhook that connects repository events to a build project in AWS CodeBuild.

", + "Project$webhook": "

Information about a webhook that connects repository events to a build project in AWS CodeBuild.

", + "UpdateWebhookOutput$webhook": "

Information about a repository's webhook that is associated with a project in AWS CodeBuild.

" } }, "WebhookBuildType": { @@ -1801,13 +1809,13 @@ "BuildBatch$debugSessionEnabled": "

Specifies if session debugging is enabled for this batch build. For more information, see Viewing a running build in Session Manager. Batch session debugging is not supported for matrix batch builds.

", "CreateProjectInput$badgeEnabled": "

Set this to true to generate a publicly accessible URL for your project's build badge.

", "DebugSession$sessionEnabled": "

Specifies if session debugging is enabled for this build.

", - "GitSubmodulesConfig$fetchSubmodules": "

Set to true to fetch Git submodules for your CodeBuild build project.

", + "GitSubmodulesConfig$fetchSubmodules": "

Set to true to fetch Git submodules for your AWS CodeBuild build project.

", "ImportSourceCredentialsInput$shouldOverwrite": "

Set to false to prevent overwriting the repository source credentials. Set to true to overwrite the repository source credentials. The default value is true.

", "ProjectArtifacts$overrideArtifactName": "

If this flag is set, a name specified in the buildspec file overrides the artifact name. The name specified in a buildspec file is calculated at build time and uses the Shell Command Language. For example, you can append a date and time to your artifact name so that it is always unique.

", "ProjectArtifacts$encryptionDisabled": "

Set to true if you do not want your output artifacts encrypted. This option is valid only if your artifacts type is Amazon S3. If this is set with another artifacts type, an invalidInputException is thrown.

", "ProjectBuildBatchConfig$combineArtifacts": "

Specifies if the build artifacts for the batch build should be combined into a single artifact location.

", "ProjectEnvironment$privilegedMode": "

Enables running the Docker daemon inside a Docker container. Set to true only if the build project is used to build Docker images. Otherwise, a build that attempts to interact with the Docker daemon fails. The default setting is false.

You can initialize the Docker daemon during the install phase of your build by adding one of the following sets of commands to the install phase of your buildspec file:

If the operating system's base image is Ubuntu Linux:

- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay&

- timeout 15 sh -c \"until docker info; do echo .; sleep 1; done\"

If the operating system's base image is Alpine Linux and the previous command does not work, add the -t argument to timeout:

- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay&

- timeout -t 15 sh -c \"until docker info; do echo .; sleep 1; done\"

", - "ProjectSource$reportBuildStatus": "

Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.

To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.

The status of a build triggered by a webhook is always reported to your source provider.

", + "ProjectSource$reportBuildStatus": "

Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an invalidInputException is thrown.

To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the AWS CodeBuild User Guide.

The status of a build triggered by a webhook is always reported to your source provider.

", "ProjectSource$insecureSsl": "

Enable this flag to ignore SSL warnings while connecting to the project source code.

", "Report$truncated": "

A boolean that specifies if this report run is truncated. The list of test cases is truncated after the maximum number of test cases is reached.

", "S3LogsConfig$encryptionDisabled": "

Set to true if you do not want your S3 build log output encrypted. By default S3 build logs are encrypted.

", @@ -1817,7 +1825,7 @@ "StartBuildBatchInput$privilegedModeOverride": "

Enable this flag to override privileged mode in the batch build project.

", "StartBuildBatchInput$debugSessionEnabled": "

Specifies if session debugging is enabled for this batch build. For more information, see Viewing a running build in Session Manager. Batch session debugging is not supported for matrix batch builds.

", "StartBuildInput$insecureSslOverride": "

Enable this flag to override the insecure SSL setting that is specified in the build project. The insecure SSL setting determines whether to ignore SSL warnings while connecting to the project source code. This override applies only if the build's source is GitHub Enterprise.

", - "StartBuildInput$reportBuildStatusOverride": "

Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException is thrown.

To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the CodeBuild User Guide.

The status of a build triggered by a webhook is always reported to your source provider.

", + "StartBuildInput$reportBuildStatusOverride": "

Set to true to report to your source provider the status of a build's start and completion. If you use this option with a source provider other than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException is thrown.

To be able to report the build status to the source provider, the user associated with the source provider must have write access to the repo. If the user does not have write access, the build status cannot be updated. For more information, see Source provider access in the AWS CodeBuild User Guide.

The status of a build triggered by a webhook is always reported to your source provider.

", "StartBuildInput$privilegedModeOverride": "

Enable this flag to override privileged mode in the build project.

", "StartBuildInput$debugSessionEnabled": "

Specifies if session debugging is enabled for this build. For more information, see Viewing a running build in Session Manager.

", "UpdateProjectInput$badgeEnabled": "

Set this to true to generate a publicly accessible URL for your project's build badge.

", @@ -1828,7 +1836,7 @@ "base": null, "refs": { "BatchRestrictions$maximumBuildsAllowed": "

Specifies the maximum number of builds allowed.

", - "Build$timeoutInMinutes": "

How long, in minutes, for CodeBuild to wait before timing out this build if it does not get marked as completed.

", + "Build$timeoutInMinutes": "

How long, in minutes, for AWS CodeBuild to wait before timing out this build if it does not get marked as completed.

", "Build$queuedTimeoutInMinutes": "

The number of minutes a build is allowed to be queued before it times out.

", "BuildBatch$buildTimeoutInMinutes": "

Specifies the maximum amount of time, in minutes, that the build in a batch must be completed in.

", "BuildBatch$queuedTimeoutInMinutes": "

Specifies the amount of time, in minutes, that the batch build is allowed to be queued before it times out.

", diff --git a/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json b/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json index 975c444230..ce270e837b 100644 --- a/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json +++ b/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json @@ -901,7 +901,7 @@ "LoadBalancerAttributeKey": { "base": null, "refs": { - "LoadBalancerAttribute$Key": "

The name of the attribute.

The following attribute is supported by all load balancers:

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

The following attributes are supported by only Application Load Balancers:

The following attribute is supported by Network Load Balancers and Gateway Load Balancers:

" + "LoadBalancerAttribute$Key": "

The name of the attribute.

The following attribute is supported by all load balancers:

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

The following attributes are supported by only Application Load Balancers:

The following attribute is supported by Network Load Balancers and Gateway Load Balancers:

" } }, "LoadBalancerAttributeValue": { @@ -1127,7 +1127,7 @@ "CreateTargetGroupInput$HealthCheckProtocol": "

The protocol the load balancer uses when performing health checks on targets. For Application Load Balancers, the default is HTTP. For Network Load Balancers and Gateway Load Balancers, the default is TCP. The TCP protocol is not supported for health checks if the protocol of the target group is HTTP or HTTPS. The GENEVE, TLS, UDP, and TCP_UDP protocols are not supported for health checks.

", "Listener$Protocol": "

The protocol for connections from clients to the load balancer.

", "ModifyListenerInput$Protocol": "

The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols. You can’t change the protocol to UDP or TCP_UDP if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer.

", - "ModifyTargetGroupInput$HealthCheckProtocol": "

The protocol the load balancer uses when performing health checks on targets. For Application Load Balancers, the default is HTTP. For Network Load Balancers and Gateway Load Balancers, the default is TCP. The TCP protocol is not supported for health checks if the protocol of the target group is HTTP or HTTPS. It is supported for health checks only if the protocol of the target group is TCP, TLS, UDP, or TCP_UDP. The GENEVE, TLS, UDP, and TCP_UDP protocols are not supported for health checks.

With Network Load Balancers, you can't modify this setting.

", + "ModifyTargetGroupInput$HealthCheckProtocol": "

The protocol the load balancer uses when performing health checks on targets. The TCP protocol is supported for health checks only if the protocol of the target group is TCP, TLS, UDP, or TCP_UDP. The GENEVE, TLS, UDP, and TCP_UDP protocols are not supported for health checks.

With Network Load Balancers, you can't modify this setting.

", "TargetGroup$Protocol": "

The protocol to use for routing traffic to the targets.

", "TargetGroup$HealthCheckProtocol": "

The protocol to use to connect with the target. The GENEVE, TLS, UDP, and TCP_UDP protocols are not supported for health checks.

" } diff --git a/models/apis/elasticmapreduce/2009-03-31/api-2.json b/models/apis/elasticmapreduce/2009-03-31/api-2.json index c1e0aeb609..5ef0dd8698 100644 --- a/models/apis/elasticmapreduce/2009-03-31/api-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/api-2.json @@ -190,6 +190,19 @@ {"shape":"InvalidRequestException"} ] }, + "DescribeReleaseLabel":{ + "name":"DescribeReleaseLabel", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeReleaseLabelInput"}, + "output":{"shape":"DescribeReleaseLabelOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, "DescribeSecurityConfiguration":{ "name":"DescribeSecurityConfiguration", "http":{ @@ -342,6 +355,19 @@ {"shape":"InvalidRequestException"} ] }, + "ListReleaseLabels":{ + "name":"ListReleaseLabels", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListReleaseLabelsInput"}, + "output":{"shape":"ListReleaseLabelsOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"InvalidRequestException"} + ] + }, "ListSecurityConfigurations":{ "name":"ListSecurityConfigurations", "http":{ @@ -1160,6 +1186,22 @@ "NotebookExecution":{"shape":"NotebookExecution"} } }, + "DescribeReleaseLabelInput":{ + "type":"structure", + "members":{ + "ReleaseLabel":{"shape":"String"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"MaxResultsNumber"} + } + }, + "DescribeReleaseLabelOutput":{ + "type":"structure", + "members":{ + "ReleaseLabel":{"shape":"String"}, + "Applications":{"shape":"SimplifiedApplicationList"}, + "NextToken":{"shape":"String"} + } + }, "DescribeSecurityConfigurationInput":{ "type":"structure", "required":["Name"], @@ -2019,6 +2061,21 @@ "Marker":{"shape":"Marker"} } }, + "ListReleaseLabelsInput":{ + "type":"structure", + "members":{ + "Filters":{"shape":"ReleaseLabelFilter"}, + "NextToken":{"shape":"String"}, + "MaxResults":{"shape":"MaxResultsNumber"} + } + }, + "ListReleaseLabelsOutput":{ + "type":"structure", + "members":{ + "ReleaseLabels":{"shape":"StringList"}, + "NextToken":{"shape":"String"} + } + }, "ListSecurityConfigurationsInput":{ "type":"structure", "members":{ @@ -2092,6 +2149,11 @@ "SPOT" ] }, + "MaxResultsNumber":{ + "type":"integer", + "max":100, + "min":1 + }, "MetricDimension":{ "type":"structure", "members":{ @@ -2322,6 +2384,13 @@ "members":{ } }, + "ReleaseLabelFilter":{ + "type":"structure", + "members":{ + "Prefix":{"shape":"String"}, + "Application":{"shape":"String"} + } + }, "RemoveAutoScalingPolicyInput":{ "type":"structure", "required":[ @@ -2556,6 +2625,17 @@ "CoolDown":{"shape":"Integer"} } }, + "SimplifiedApplication":{ + "type":"structure", + "members":{ + "Name":{"shape":"String"}, + "Version":{"shape":"String"} + } + }, + "SimplifiedApplicationList":{ + "type":"list", + "member":{"shape":"SimplifiedApplication"} + }, "SpotProvisioningAllocationStrategy":{ "type":"string", "enum":["capacity-optimized"] diff --git a/models/apis/elasticmapreduce/2009-03-31/docs-2.json b/models/apis/elasticmapreduce/2009-03-31/docs-2.json index 50cd4bbd6f..94e59f3418 100644 --- a/models/apis/elasticmapreduce/2009-03-31/docs-2.json +++ b/models/apis/elasticmapreduce/2009-03-31/docs-2.json @@ -1,12 +1,12 @@ { "version": "2.0", - "service": "

Amazon EMR is a web service that makes it easier to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several AWS services to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehouse management.

", + "service": "

Amazon EMR is a web service that makes it easier to process large amounts of data efficiently. Amazon EMR uses Hadoop processing combined with several Amazon Web Services services to do tasks such as web indexing, data mining, log file analysis, machine learning, scientific simulation, and data warehouse management.

", "operations": { "AddInstanceFleet": "

Adds an instance fleet to a running cluster.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x.

", "AddInstanceGroups": "

Adds one or more instance groups to a running cluster.

", "AddJobFlowSteps": "

AddJobFlowSteps adds new steps to a running cluster. A maximum of 256 steps are allowed in each job flow.

If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using SSH to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, see Add More than 256 Steps to a Cluster in the Amazon EMR Management Guide.

A step specifies the location of a JAR file stored either on the master node of the cluster or in Amazon S3. Each step is performed by the main function of the main class of the JAR file. The main class can be specified either in the manifest of the JAR or by using the MainFunction parameter of the step.

Amazon EMR executes each step in the order listed. For a step to be considered complete, the main function must exit with a zero exit code and all Hadoop jobs started while the step was running must have completed and run successfully.

You can only add steps to a cluster that is in one of the following states: STARTING, BOOTSTRAPPING, RUNNING, or WAITING.

", "AddTags": "

Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tag Clusters.

", - "CancelSteps": "

Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee that a step will be canceled, even if the request is successfully submitted. You can only cancel steps that are in a PENDING state.

", + "CancelSteps": "

Cancels a pending step or steps in a running cluster. Available only in Amazon EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; it does not guarantee that a step will be canceled, even if the request is successfully submitted. When you use Amazon EMR versions 5.28.0 and later, you can cancel steps that are in a PENDING or RUNNING state. In earlier versions of Amazon EMR, you can only cancel steps that are in a PENDING state.

", "CreateSecurityConfiguration": "

Creates a security configuration, which is stored in the service and can be specified when a cluster is created.

", "CreateStudio": "

Creates a new Amazon EMR Studio.

", "CreateStudioSessionMapping": "

Maps a user or group to the Amazon EMR Studio specified by StudioId, and applies a session policy to refine Studio permissions for that user or group.

", @@ -16,34 +16,36 @@ "DescribeCluster": "

Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on.

", "DescribeJobFlows": "

This API is no longer supported and will eventually be removed. We recommend you use ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions instead.

DescribeJobFlows returns a list of job flows that match all of the supplied parameters. The parameters can include a list of job flow IDs, job flow states, and restrictions on job flow creation date and time.

Regardless of supplied parameters, only job flows created within the last two months are returned.

If no parameters are supplied, then job flows matching either of the following criteria are returned:

Amazon EMR can return a maximum of 512 job flow descriptions.

", "DescribeNotebookExecution": "

Provides details of a notebook execution.

", + "DescribeReleaseLabel": "

Provides EMR release label details, such as releases available the region where the API request is run, and the available applications for a specific EMR release label. Can also list EMR release versions that support a specified version of Spark.

", "DescribeSecurityConfiguration": "

Provides the details of a security configuration by returning the configuration JSON.

", "DescribeStep": "

Provides more detail about the cluster step.

", "DescribeStudio": "

Returns details for the specified Amazon EMR Studio including ID, Name, VPC, Studio access URL, and so on.

", - "GetBlockPublicAccessConfiguration": "

Returns the Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

", + "GetBlockPublicAccessConfiguration": "

Returns the Amazon EMR block public access configuration for your account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

", "GetManagedScalingPolicy": "

Fetches the attached managed scaling policy for an Amazon EMR cluster.

", "GetStudioSessionMapping": "

Fetches mapping details for the specified Amazon EMR Studio and identity (user or group).

", "ListBootstrapActions": "

Provides information about the bootstrap actions associated with a cluster.

", - "ListClusters": "

Provides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.

", + "ListClusters": "

Provides the status of all clusters visible to this account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status. This call returns a maximum of 50 clusters in unsorted order per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls.

", "ListInstanceFleets": "

Lists all available details about the instance fleets in a cluster.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", "ListInstanceGroups": "

Provides all available details about the instance groups in a cluster.

", "ListInstances": "

Provides information for all active EC2 instances and EC2 instances terminated in the last 30 days, up to a maximum of 2,000. EC2 instances in any of the following states are considered active: AWAITING_FULFILLMENT, PROVISIONING, BOOTSTRAPPING, RUNNING.

", "ListNotebookExecutions": "

Provides summaries of all notebook executions. You can filter the list based on multiple criteria such as status, time range, and editor id. Returns a maximum of 50 notebook executions and a marker to track the paging of a longer notebook execution list across multiple ListNotebookExecution calls.

", + "ListReleaseLabels": "

Retrieves release labels of EMR services in the region where the API is called.

", "ListSecurityConfigurations": "

Lists all the security configurations visible to this account, providing their creation dates and times, and their names. This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListSecurityConfigurations calls.

", - "ListSteps": "

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request of filter by StepStates. You can specify a maximum of 10 stepIDs.

", + "ListSteps": "

Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request or filter by StepStates. You can specify a maximum of 10 stepIDs. The CLI automatically paginates results to return a list greater than 50 steps. To return more than 50 steps using the CLI, specify a Marker, which is a pagination token that indicates the next set of steps to retrieve.

", "ListStudioSessionMappings": "

Returns a list of all user or group session mappings for the Amazon EMR Studio specified by StudioId.

", - "ListStudios": "

Returns a list of all Amazon EMR Studios associated with the AWS account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.

", + "ListStudios": "

Returns a list of all Amazon EMR Studios associated with the account. The list includes details such as ID, Studio Access URL, and creation time for each Studio.

", "ModifyCluster": "

Modifies the number of steps that can be executed concurrently for the cluster specified using ClusterID.

", "ModifyInstanceFleet": "

Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID. The call either succeeds or fails atomically.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", "ModifyInstanceGroups": "

ModifyInstanceGroups modifies the number of nodes and configuration settings of an instance group. The input parameters include the new target instance count for the group and the instance group ID. The call will either succeed or fail atomically.

", "PutAutoScalingPolicy": "

Creates or updates an automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric.

", - "PutBlockPublicAccessConfiguration": "

Creates or updates an Amazon EMR block public access configuration for your AWS account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

", + "PutBlockPublicAccessConfiguration": "

Creates or updates an Amazon EMR block public access configuration for your account in the current Region. For more information see Configure Block Public Access for Amazon EMR in the Amazon EMR Management Guide.

", "PutManagedScalingPolicy": "

Creates or updates a managed scaling policy for an Amazon EMR cluster. The managed scaling policy defines the limits for resources, such as EC2 instances that can be added or terminated from a cluster. The policy only applies to the core and task nodes. The master node cannot be scaled after initial configuration.

", "RemoveAutoScalingPolicy": "

Removes an automatic scaling policy from a specified instance group within an EMR cluster.

", "RemoveManagedScalingPolicy": "

Removes a managed scaling policy from a specified EMR cluster.

", "RemoveTags": "

Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tag Clusters.

The following example removes the stack tag with value Prod from a cluster:

", "RunJobFlow": "

RunJobFlow creates and starts running a new cluster (job flow). The cluster runs the steps specified. After the steps complete, the cluster stops and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig KeepJobFlowAliveWhenNoSteps parameter is set to TRUE, the cluster transitions to the WAITING state rather than shutting down after the steps have completed.

For additional protection, you can set the JobFlowInstancesConfig TerminationProtected parameter to TRUE to lock the cluster and prevent it from being terminated by API call, user intervention, or in the event of a job flow error.

A maximum of 256 steps are allowed in each job flow.

If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, see Add More than 256 Steps to a Cluster in the Amazon EMR Management Guide.

For long running clusters, we recommend that you periodically store your results.

The instance fleets configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. The RunJobFlow request can contain InstanceFleets parameters or InstanceGroups parameters, but not both.

", "SetTerminationProtection": "

SetTerminationProtection locks a cluster (job flow) so the EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection on a cluster is similar to calling the Amazon EC2 DisableAPITermination API on all EC2 instances in a cluster.

SetTerminationProtection is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.

To terminate a cluster that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false.

For more information, seeManaging Cluster Termination in the Amazon EMR Management Guide.

", - "SetVisibleToAllUsers": "

Sets the Cluster$VisibleToAllUsers value, which determines whether the cluster is visible to all IAM users of the AWS account associated with the cluster. Only the IAM user who created the cluster or the AWS account root user can call this action. The default value, true, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If set to false, only the IAM user that created the cluster can perform actions. This action works on running clusters. You can override the default true setting when you create a cluster by using the VisibleToAllUsers parameter with RunJobFlow.

", + "SetVisibleToAllUsers": "

Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When true, IAM principals in the account can perform EMR cluster actions that their IAM policies allow. When false, only the IAM principal that created the cluster and the account root user can perform EMR actions on the cluster, regardless of IAM permissions policies attached to other IAM principals.

This action works on running clusters. When you create a cluster, use the RunJobFlowInput$VisibleToAllUsers parameter.

For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.

", "StartNotebookExecution": "

Starts a notebook execution.

", "StopNotebookExecution": "

Stops a notebook execution.

", "TerminateJobFlows": "

TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow is shut down, any step not yet completed is canceled and the EC2 instances on which the cluster is running are stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was specified when the cluster was created.

The maximum number of clusters allowed is 10. The call to TerminateJobFlows is asynchronous. Depending on the configuration of the cluster, it may take up to 1-5 minutes for the cluster to completely terminate and release allocated resources, such as Amazon EC2 instances.

", @@ -54,9 +56,9 @@ "ActionOnFailure": { "base": null, "refs": { - "Step$ActionOnFailure": "

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

", - "StepConfig$ActionOnFailure": "

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

", - "StepSummary$ActionOnFailure": "

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

" + "Step$ActionOnFailure": "

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward compatibility. We recommend using TERMINATE_CLUSTER instead.

If a cluster's StepConcurrencyLevel is greater than 1, do not use AddJobFlowSteps to submit a step with this parameter set to CANCEL_AND_WAIT or TERMINATE_CLUSTER. The step is not submitted and the action fails with a message that the ActionOnFailure setting is not valid.

If you change a cluster's StepConcurrencyLevel to be greater than 1 while a step is running, the ActionOnFailure parameter may not behave as you expect. In this case, for a step that fails with this parameter set to CANCEL_AND_WAIT, pending steps and the running step are not canceled; for a step that fails with this parameter set to TERMINATE_CLUSTER, the cluster does not terminate.

", + "StepConfig$ActionOnFailure": "

The action to take when the step fails. Use one of the following values:

If a cluster's StepConcurrencyLevel is greater than 1, do not use AddJobFlowSteps to submit a step with this parameter set to CANCEL_AND_WAIT or TERMINATE_CLUSTER. The step is not submitted and the action fails with a message that the ActionOnFailure setting is not valid.

If you change a cluster's StepConcurrencyLevel to be greater than 1 while a step is running, the ActionOnFailure parameter may not behave as you expect. In this case, for a step that fails with this parameter set to CANCEL_AND_WAIT, pending steps and the running step are not canceled; for a step that fails with this parameter set to TERMINATE_CLUSTER, the cluster does not terminate.

", + "StepSummary$ActionOnFailure": "

The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward compatibility.

" } }, "AddInstanceFleetInput": { @@ -183,9 +185,9 @@ } }, "BlockPublicAccessConfigurationMetadata": { - "base": "

Properties that describe the AWS principal that created the BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.

", + "base": "

Properties that describe the Amazon Web Services principal that created the BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.

", "refs": { - "GetBlockPublicAccessConfigurationOutput$BlockPublicAccessConfigurationMetadata": "

Properties that describe the AWS principal that created the BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.

" + "GetBlockPublicAccessConfigurationOutput$BlockPublicAccessConfigurationMetadata": "

Properties that describe the Amazon Web Services principal that created the BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration action as well as the date and time that the configuration was created. Each time a configuration for block public access is updated, Amazon EMR updates this metadata.

" } }, "Boolean": { @@ -194,15 +196,15 @@ "BlockPublicAccessConfiguration$BlockPublicSecurityGroupRules": "

Indicates whether Amazon EMR block public access is enabled (true) or disabled (false). By default, the value is false for accounts that have created EMR clusters before July 2019. For accounts created after this, the default is true.

", "Cluster$AutoTerminate": "

Specifies whether the cluster should terminate after completing all steps.

", "Cluster$TerminationProtected": "

Indicates whether Amazon EMR will lock the cluster to prevent the EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.

", - "Cluster$VisibleToAllUsers": "

Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. The default value, true, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If this value is false, only the IAM user that created the cluster can perform actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of true when you create a cluster by using the VisibleToAllUsers parameter of the RunJobFlow action.

", - "JobFlowDetail$VisibleToAllUsers": "

Indicates whether the cluster is visible to all IAM users of the AWS account associated with the cluster. The default value, true, indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. If this value is false, only the IAM user that created the cluster can perform actions. This value can be changed on a running cluster by using the SetVisibleToAllUsers action. You can override the default value of true when you create a cluster by using the VisibleToAllUsers parameter of the RunJobFlow action.

", - "JobFlowInstancesConfig$KeepJobFlowAliveWhenNoSteps": "

Specifies whether the cluster should remain available after completing all steps.

", + "Cluster$VisibleToAllUsers": "

Indicates whether the cluster is visible to IAM principals in the account associated with the cluster. When true, IAM principals in the account can perform EMR cluster actions on the cluster that their IAM policies allow. When false, only the IAM principal that created the cluster and the account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.

The default value is false if a value is not provided when creating a cluster using the EMR API RunJobFlow command or the CLI create-cluster command. The default value is true when a cluster is created using the Management Console. IAM principals that are allowed to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.

", + "JobFlowDetail$VisibleToAllUsers": "

Indicates whether the cluster is visible to IAM principals in the account associated with the cluster. When true, IAM principals in the account can perform EMR cluster actions that their IAM policies allow. When false, only the IAM principal that created the cluster and the account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.

The default value is false if a value is not provided when creating a cluster using the EMR API RunJobFlow command or the CLI create-cluster command. The default value is true when a cluster is created using the Management Console. IAM principals that are authorized to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.

", + "JobFlowInstancesConfig$KeepJobFlowAliveWhenNoSteps": "

Specifies whether the cluster should remain available after completing all steps. Defaults to true. For more information about configuring cluster termination, see Control Cluster Termination in the EMR Management Guide.

", "JobFlowInstancesConfig$TerminationProtected": "

Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.

", "JobFlowInstancesDetail$KeepJobFlowAliveWhenNoSteps": "

Specifies whether the cluster should remain available after completing all steps.

", "JobFlowInstancesDetail$TerminationProtected": "

Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.

", - "RunJobFlowInput$VisibleToAllUsers": "

A value of true indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. This is the default. A value of false indicates that only the IAM user who created the cluster can perform actions.

", + "RunJobFlowInput$VisibleToAllUsers": "

Set this value to true so that IAM principals in the account associated with the cluster can perform EMR actions on the cluster that their IAM policies allow. This value defaults to false for clusters created using the EMR API or the CLI create-cluster command.

When set to false, only the IAM principal that created the cluster and the account root user can perform EMR actions for the cluster, regardless of the IAM permissions policies attached to other IAM principals. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMR Management Guide.

", "SetTerminationProtectionInput$TerminationProtected": "

A Boolean that indicates whether to protect the cluster and prevent the Amazon EC2 instances in the cluster from shutting down due to API calls, user intervention, or job-flow error.

", - "SetVisibleToAllUsersInput$VisibleToAllUsers": "

A value of true indicates that all IAM users in the AWS account can perform cluster actions if they have the proper IAM policy permissions. This is the default. A value of false indicates that only the IAM user who created the cluster can perform actions.

" + "SetVisibleToAllUsersInput$VisibleToAllUsers": "

A value of true indicates that an IAM principal in the account can perform EMR actions on the cluster that the IAM policies attached to the principal allow. A value of false indicates that only the IAM principal that created the cluster and the Amazon Web Services root user can perform EMR actions on the cluster.

" } }, "BooleanObject": { @@ -322,7 +324,7 @@ "ClusterStateList": { "base": null, "refs": { - "ListClustersInput$ClusterStates": "

The cluster state filters to apply when listing clusters.

" + "ListClustersInput$ClusterStates": "

The cluster state filters to apply when listing clusters. Clusters that change state while this action runs may be not be returned as expected in the list of clusters.

" } }, "ClusterStatus": { @@ -391,7 +393,7 @@ "refs": { "Cluster$Configurations": "

Applies only to Amazon EMR releases 4.x and later. The list of Configurations supplied to the EMR cluster.

", "Configuration$Configurations": "

A list of additional configurations to apply within a configuration object.

", - "InstanceGroup$Configurations": "

Amazon EMR releases 4.x or later.

The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

", + "InstanceGroup$Configurations": "

Amazon EMR releases 4.x or later.

The list of configurations supplied for an Amazon EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

", "InstanceGroup$LastSuccessfullyAppliedConfigurations": "

A list of configurations that were successfully applied for an instance group last time.

", "InstanceGroupConfig$Configurations": "

Amazon EMR releases 4.x or later.

The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).

", "InstanceGroupModifyConfig$Configurations": "

A list of new or modified configurations to apply for an instance group.

", @@ -525,6 +527,16 @@ "refs": { } }, + "DescribeReleaseLabelInput": { + "base": null, + "refs": { + } + }, + "DescribeReleaseLabelOutput": { + "base": null, + "refs": { + } + }, "DescribeSecurityConfigurationInput": { "base": null, "refs": { @@ -590,14 +602,14 @@ "base": null, "refs": { "InstanceGroup$EbsBlockDevices": "

The EBS block devices that are mapped to this instance group.

", - "InstanceTypeSpecification$EbsBlockDevices": "

The configuration of Amazon Elastic Block Storage (Amazon EBS) attached to each instance as defined by InstanceType.

" + "InstanceTypeSpecification$EbsBlockDevices": "

The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by InstanceType.

" } }, "EbsConfiguration": { "base": "

The Amazon EBS configuration of a cluster instance.

", "refs": { "InstanceGroupConfig$EbsConfiguration": "

EBS configurations that will be attached to each EC2 instance in the instance group.

", - "InstanceTypeConfig$EbsConfiguration": "

The configuration of Amazon Elastic Block Storage (Amazon EBS) attached to each instance as defined by InstanceType.

" + "InstanceTypeConfig$EbsConfiguration": "

The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by InstanceType.

" } }, "EbsVolume": { @@ -609,7 +621,7 @@ "EbsVolumeList": { "base": null, "refs": { - "Instance$EbsVolumes": "

The list of EBS volumes that are attached to this instance.

" + "Instance$EbsVolumes": "

The list of Amazon EBS volumes that are attached to this instance.

" } }, "Ec2InstanceAttributes": { @@ -755,7 +767,7 @@ "InstanceFleetModifyConfig": { "base": "

Configuration parameters for an instance fleet modification request.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", "refs": { - "ModifyInstanceFleetInput$InstanceFleet": "

The unique identifier of the instance fleet.

" + "ModifyInstanceFleetInput$InstanceFleet": "

The configuration parameters of the instance fleet.

" } }, "InstanceFleetProvisioningSpecifications": { @@ -994,7 +1006,7 @@ } }, "InstanceTypeConfig": { - "base": "

An instance type configuration for each instance type in an instance fleet, which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. There can be a maximum of five instance type configurations in a fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", + "base": "

An instance type configuration for each instance type in an instance fleet, which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. When you use an allocation strategy, you can include a maximum of 30 instance type configurations for a fleet. For more information about how to use an allocation strategy, see Configure Instance Fleets. Without an allocation strategy, you may specify a maximum of five instance type configurations for a fleet.

The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.

", "refs": { "InstanceTypeConfigList$member": null } @@ -1014,7 +1026,7 @@ "InstanceTypeSpecificationList": { "base": null, "refs": { - "InstanceFleet$InstanceTypeSpecifications": "

The specification for the instance types that comprise an instance fleet. Up to five unique instance specifications may be defined for each instance fleet.

" + "InstanceFleet$InstanceTypeSpecifications": "

An array of specifications for the instance types that comprise an instance fleet.

" } }, "Integer": { @@ -1041,7 +1053,7 @@ "JobFlowInstancesConfig$InstanceCount": "

The number of EC2 instances in the cluster.

", "JobFlowInstancesDetail$InstanceCount": "

The number of Amazon EC2 instances in the cluster. If the value is 1, the same instance serves as both the master and core and task node. If the value is greater than 1, one instance is the master node and all others are core and task nodes.

", "JobFlowInstancesDetail$NormalizedInstanceHours": "

An approximation of the cost of the cluster, represented in m1.small/hours. This value is increased one time for every hour that an m1.small instance runs. Larger instances are weighted more heavily, so an Amazon EC2 instance that is roughly four times more expensive would result in the normalized instance hours being increased incrementally four times. This result is only an approximation and does not reflect the actual billing rate.

", - "ModifyClusterInput$StepConcurrencyLevel": "

The number of steps that can be executed concurrently. You can specify a minimum of 1 step and a maximum of 256 steps.

", + "ModifyClusterInput$StepConcurrencyLevel": "

The number of steps that can be executed concurrently. You can specify a minimum of 1 step and a maximum of 256 steps. We recommend that you do not change this parameter while steps are running or the ActionOnFailure setting may not behave as expected. For more information see Step$ActionOnFailure.

", "ModifyClusterOutput$StepConcurrencyLevel": "

The number of steps that can be executed concurrently.

", "RunJobFlowInput$EbsRootVolumeSize": "

The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.

", "RunJobFlowInput$StepConcurrencyLevel": "

Specifies the number of steps that can be executed concurrently. The default value is 1. The maximum value is 256.

", @@ -1191,6 +1203,16 @@ "refs": { } }, + "ListReleaseLabelsInput": { + "base": null, + "refs": { + } + }, + "ListReleaseLabelsOutput": { + "base": null, + "refs": { + } + }, "ListSecurityConfigurationsInput": { "base": null, "refs": { @@ -1263,8 +1285,8 @@ "ListNotebookExecutionsOutput$Marker": "

A pagination token that a subsequent ListNotebookExecutions can use to determine the next set of results to retrieve.

", "ListSecurityConfigurationsInput$Marker": "

The pagination token that indicates the set of results to retrieve.

", "ListSecurityConfigurationsOutput$Marker": "

A pagination token that indicates the next set of results to retrieve. Include the marker in the next ListSecurityConfiguration call to retrieve the next page of results, if required.

", - "ListStepsInput$Marker": "

The pagination token that indicates the next set of results to retrieve.

", - "ListStepsOutput$Marker": "

The pagination token that indicates the next set of results to retrieve.

", + "ListStepsInput$Marker": "

The maximum number of steps that a single ListSteps action returns is 50. To return a longer list of steps, use multiple ListSteps actions along with the Marker parameter, which is a pagination token that indicates the next set of results to retrieve.

", + "ListStepsOutput$Marker": "

The maximum number of steps that a single ListSteps action returns is 50. To return a longer list of steps, use multiple ListSteps actions along with the Marker parameter, which is a pagination token that indicates the next set of results to retrieve.

", "ListStudioSessionMappingsInput$Marker": "

The pagination token that indicates the set of results to retrieve.

", "ListStudioSessionMappingsOutput$Marker": "

The pagination token that indicates the next set of results to retrieve.

", "ListStudiosInput$Marker": "

The pagination token that indicates the set of results to retrieve.

", @@ -1281,6 +1303,13 @@ "ScalingAction$Market": "

Not available for instance groups. Instance groups use the market type specified for the group.

" } }, + "MaxResultsNumber": { + "base": null, + "refs": { + "DescribeReleaseLabelInput$MaxResults": "

Reserved for future use. Currently set to null.

", + "ListReleaseLabelsInput$MaxResults": "

Defines the maximum number of release labels to return in a single response. The default is 100.

" + } + }, "MetricDimension": { "base": "

A CloudWatch dimension, which is specified using a Key (known as a Name in CloudWatch), Value pair. By default, Amazon EMR uses one dimension whose Key is JobFlowID and Value is a variable representing the cluster ID, which is ${emr.clusterId}. This enables the rule to bootstrap when the cluster ID becomes available.

", "refs": { @@ -1342,7 +1371,7 @@ } }, "NotebookExecutionSummary": { - "base": "

", + "base": "

Details for a notebook execution. The details include information such as the unique ID and status of the notebook execution.

", "refs": { "NotebookExecutionSummaryList$member": null } @@ -1368,7 +1397,7 @@ "OnDemandCapacityReservationUsageStrategy": { "base": null, "refs": { - "OnDemandCapacityReservationOptions$UsageStrategy": "

Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.

If you specify use-capacity-reservations-first, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price).

If you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy.

" + "OnDemandCapacityReservationOptions$UsageStrategy": "

Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.

If you specify use-capacity-reservations-first, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price).

If you do not specify a value, the fleet fulfills the On-Demand capacity according to the chosen On-Demand allocation strategy.

" } }, "OnDemandProvisioningAllocationStrategy": { @@ -1465,6 +1494,12 @@ "refs": { } }, + "ReleaseLabelFilter": { + "base": "

The release label filters by application or version prefix.

", + "refs": { + "ListReleaseLabelsInput$Filters": "

Filters the results of the request. Prefix specifies the prefix of release labels to return. Application specifies the application (with/without version) of release labels to return.

" + } + }, "RemoveAutoScalingPolicyInput": { "base": null, "refs": { @@ -1625,6 +1660,18 @@ "ScalingAction$SimpleScalingPolicyConfiguration": "

The type of adjustment the automatic scaling activity makes when triggered, and the periodicity of the adjustment.

" } }, + "SimplifiedApplication": { + "base": "

The returned release label application names or versions.

", + "refs": { + "SimplifiedApplicationList$member": null + } + }, + "SimplifiedApplicationList": { + "base": null, + "refs": { + "DescribeReleaseLabelOutput$Applications": "

The list of applications available for the target release label. Name is the name of the application. Version is the concise version of the application.

" + } + }, "SpotProvisioningAllocationStrategy": { "base": null, "refs": { @@ -1672,7 +1719,7 @@ } }, "StepConfig": { - "base": "

Specification of a cluster (job flow) step.

", + "base": "

Specification for a cluster (job flow) step.

", "refs": { "StepConfigList$member": null, "StepDetail$StepConfig": "

The step configuration.

" @@ -1791,11 +1838,11 @@ "CloudWatchAlarmDefinition$Namespace": "

The namespace for the CloudWatch metric. The default is AWS/ElasticMapReduce.

", "Cluster$Name": "

The name of the cluster.

", "Cluster$LogUri": "

The path to the Amazon S3 location where logs for this cluster are stored.

", - "Cluster$LogEncryptionKmsKeyId": "

The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

", + "Cluster$LogEncryptionKmsKeyId": "

The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

", "Cluster$RequestedAmiVersion": "

The AMI version requested for this cluster.

", "Cluster$RunningAmiVersion": "

The AMI version running on this cluster.

", "Cluster$ReleaseLabel": "

The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster. Release labels are in the form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. For more information about Amazon EMR release versions and included application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to Amazon EMR releases version 4.0 and later. Earlier versions use AmiVersion.

", - "Cluster$ServiceRole": "

The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.

", + "Cluster$ServiceRole": "

The IAM role that will be assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.

", "Cluster$MasterPublicDnsName": "

The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.

", "ClusterStateChangeReason$Message": "

The descriptive message for the state change reason.

", "ClusterSummary$Name": "

The name of the cluster.

", @@ -1803,6 +1850,10 @@ "Command$ScriptPath": "

The Amazon S3 location of the command script.

", "Configuration$Classification": "

The classification within a configuration.

", "CreateSecurityConfigurationInput$SecurityConfiguration": "

The security configuration details in JSON format. For JSON parameters and examples, see Use Security Configurations to Set Up Cluster Security in the Amazon EMR Management Guide.

", + "DescribeReleaseLabelInput$ReleaseLabel": "

The target release label to be described.

", + "DescribeReleaseLabelInput$NextToken": "

The pagination token. Reserved for future use. Currently set to null.

", + "DescribeReleaseLabelOutput$ReleaseLabel": "

The target release label described in the response.

", + "DescribeReleaseLabelOutput$NextToken": "

The pagination token. Reserved for future use. Currently set to null.

", "DescribeSecurityConfigurationOutput$SecurityConfiguration": "

The security configuration details in JSON format.

", "EbsBlockDevice$Device": "

The device name that is exposed to the instance, such as /dev/sdh.

", "EbsVolume$Device": "

The device name that is exposed to the instance, such as /dev/sdh.

", @@ -1829,11 +1880,17 @@ "InstanceGroup$BidPrice": "

If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice to set the amount equal to the On-Demand price, or specify an amount in USD.

", "InstanceGroupStateChangeReason$Message": "

The status change reason description.

", "InstanceStateChangeReason$Message": "

The status change reason description.

", + "ListReleaseLabelsInput$NextToken": "

Specifies the next page of results. If NextToken is not specified, which is usually the case for the first request of ListReleaseLabels, the first page of results are determined by other filtering parameters or by the latest version. The ListReleaseLabels request fails if the identity (AWS AccountID) and all filtering parameters are different from the original request, or if the NextToken is expired or tampered with.

", + "ListReleaseLabelsOutput$NextToken": "

Used to paginate the next page of results if specified in the next ListReleaseLabels request.

", "MetricDimension$Key": "

The dimension name.

", "MetricDimension$Value": "

The dimension value.

", "ModifyClusterInput$ClusterId": "

The unique identifier of the cluster.

", + "ReleaseLabelFilter$Prefix": "

Optional release label version prefix filter. For example, emr-5.

", + "ReleaseLabelFilter$Application": "

Optional release label application filter. For example, spark@2.1.0.

", "ScalingRule$Name": "

The name used to identify an automatic scaling rule. Rule names must be unique within a scaling policy.

", "ScalingRule$Description": "

A friendly, more verbose description of the automatic scaling rule.

", + "SimplifiedApplication$Name": "

The returned release label application name. For example, hadoop.

", + "SimplifiedApplication$Version": "

The returned release label application version. For example, 3.2.1.

", "Step$Name": "

The name of the cluster step.

", "StepStateChangeReason$Message": "

The descriptive message for the state change reason.

", "StepSummary$Name": "

The name of the cluster step.

", @@ -1854,6 +1911,7 @@ "Ec2InstanceAttributes$AdditionalMasterSecurityGroups": "

A list of additional Amazon EC2 security group IDs for the master node.

", "Ec2InstanceAttributes$AdditionalSlaveSecurityGroups": "

A list of additional Amazon EC2 security group IDs for the core and task nodes.

", "HadoopStepConfig$Args": "

The list of command line arguments to pass to the JAR file's main function for execution.

", + "ListReleaseLabelsOutput$ReleaseLabels": "

The returned release labels.

", "RemoveTagsInput$TagKeys": "

A list of tag keys to remove from a resource.

" } }, @@ -1974,7 +2032,7 @@ "Cluster$AutoScalingRole": "

An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.

", "CreateSecurityConfigurationInput$Name": "

The name of the security configuration.

", "CreateSecurityConfigurationOutput$Name": "

The name of the security configuration.

", - "CreateStudioInput$ServiceRole": "

The IAM role that will be assumed by the Amazon EMR Studio. The service role provides a way for Amazon EMR Studio to interoperate with other AWS services.

", + "CreateStudioInput$ServiceRole": "

The IAM role that will be assumed by the Amazon EMR Studio. The service role provides a way for Amazon EMR Studio to interoperate with other Amazon Web Services services.

", "CreateStudioInput$UserRole": "

The IAM user role that will be assumed by users and groups logged in to an Amazon EMR Studio. The permissions attached to this IAM role can be scoped down for each user or group using session policies.

", "CreateStudioInput$DefaultS3Location": "

The Amazon S3 location to back up Amazon EMR Studio Workspaces and notebook files.

", "CreateStudioOutput$Url": "

The unique Studio access URL.

", @@ -1985,9 +2043,9 @@ "HadoopJarStepConfig$MainClass": "

The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.

", "InstanceGroupDetail$LastStateChangeReason": "

Details regarding the state of the instance group.

", "JobFlowDetail$LogUri": "

The location in Amazon S3 where log files for the job are stored.

", - "JobFlowDetail$LogEncryptionKmsKeyId": "

The AWS KMS customer master key (CMK) used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

", + "JobFlowDetail$LogEncryptionKmsKeyId": "

The KMS key used for encrypting log files. This attribute is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0.

", "JobFlowDetail$JobFlowRole": "

The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.

", - "JobFlowDetail$ServiceRole": "

The IAM role that is assumed by the Amazon EMR service to access AWS resources on your behalf.

", + "JobFlowDetail$ServiceRole": "

The IAM role that is assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.

", "JobFlowDetail$AutoScalingRole": "

An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. The IAM role provides a way for the automatic scaling feature to get the required permissions it needs to launch and terminate EC2 instances in an instance group.

", "JobFlowExecutionStatusDetail$LastStateChangeReason": "

Description of the job flow last changed state.

", "JobFlowInstancesDetail$MasterPublicDnsName": "

The DNS name of the master node. If the cluster is on a private subnet, this is the private DNS name. On a public subnet, this is the public DNS name.

", @@ -1999,13 +2057,13 @@ "NotebookExecution$LastStateChangeReason": "

The reason for the latest status change of the notebook execution.

", "PlacementType$AvailabilityZone": "

The Amazon EC2 Availability Zone for the cluster. AvailabilityZone is used for uniform instance groups, while AvailabilityZones (plural) is used for instance fleets.

", "RunJobFlowInput$LogUri": "

The location in Amazon S3 to write the log files of the job flow. If a value is not provided, logs are not created.

", - "RunJobFlowInput$LogEncryptionKmsKeyId": "

The AWS KMS customer master key (CMK) used for encrypting log files. If a value is not provided, the logs remain encrypted by AES-256. This attribute is only available with Amazon EMR version 5.30.0 and later, excluding Amazon EMR 6.0.0.

", + "RunJobFlowInput$LogEncryptionKmsKeyId": "

The KMS key used for encrypting log files. If a value is not provided, the logs remain encrypted by AES-256. This attribute is only available with Amazon EMR version 5.30.0 and later, excluding Amazon EMR 6.0.0.

", "RunJobFlowInput$AdditionalInfo": "

A JSON string for selecting additional features.

", "RunJobFlowInput$JobFlowRole": "

Also called instance profile and EC2 role. An IAM role for an EMR cluster. The EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole. In order to use the default role, you must have already created it using the CLI or console.

", - "RunJobFlowInput$ServiceRole": "

The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.

", + "RunJobFlowInput$ServiceRole": "

The IAM role that will be assumed by the Amazon EMR service to access Amazon Web Services resources on your behalf.

", "RunJobFlowInput$SecurityConfiguration": "

The name of a security configuration to apply to the cluster.

", "RunJobFlowInput$AutoScalingRole": "

An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.

", - "ScriptBootstrapActionConfig$Path": "

Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system.

", + "ScriptBootstrapActionConfig$Path": "

Location in Amazon S3 of the script to run during a bootstrap action.

", "SecurityConfigurationSummary$Name": "

The name of the security configuration.

", "StartNotebookExecutionInput$RelativePath": "

The path and file name of the notebook file for this execution, relative to the path specified for the EMR Notebook. For example, if you specify a path of s3://MyBucket/MyNotebooks when you create an EMR Notebook for a notebook with an ID of e-ABCDEFGHIJK1234567890ABCD (the EditorID of this request), and you specify a RelativePath of my_notebook_executions/notebook_execution.ipynb, the location of the file for the notebook execution is s3://MyBucket/MyNotebooks/e-ABCDEFGHIJK1234567890ABCD/my_notebook_executions/notebook_execution.ipynb.

", "StartNotebookExecutionInput$NotebookParams": "

Input parameters in JSON format passed to the EMR Notebook at runtime for execution.

", @@ -2050,20 +2108,20 @@ "CreateStudioInput$EngineSecurityGroupId": "

The ID of the Amazon EMR Studio Engine security group. The Engine security group allows inbound network traffic from the Workspace security group, and it must be in the same VPC specified by VpcId.

", "CreateStudioOutput$StudioId": "

The ID of the Amazon EMR Studio.

", "CreateStudioSessionMappingInput$StudioId": "

The ID of the Amazon EMR Studio to which the user or group will be mapped.

", - "CreateStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group from the AWS SSO Identity Store. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", - "CreateStudioSessionMappingInput$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", - "CreateStudioSessionMappingInput$SessionPolicyArn": "

The Amazon Resource Name (ARN) for the session policy that will be applied to the user or group. Session policies refine Studio user permissions without the need to use multiple IAM user roles.

", + "CreateStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity Store. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", + "CreateStudioSessionMappingInput$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", + "CreateStudioSessionMappingInput$SessionPolicyArn": "

The Amazon Resource Name (ARN) for the session policy that will be applied to the user or group. You should specify the ARN for the session policy that you want to apply, not the ARN of your user role. For more information, see Create an EMR Studio User Role with Session Policies.

", "DeleteStudioInput$StudioId": "

The ID of the Amazon EMR Studio.

", "DeleteStudioSessionMappingInput$StudioId": "

The ID of the Amazon EMR Studio.

", - "DeleteStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group to remove from the Amazon EMR Studio. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", - "DeleteStudioSessionMappingInput$IdentityName": "

The name of the user name or group to remove from the Amazon EMR Studio. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", + "DeleteStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group to remove from the Amazon EMR Studio. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", + "DeleteStudioSessionMappingInput$IdentityName": "

The name of the user name or group to remove from the Amazon EMR Studio. For more information, see UserName and DisplayName in the Amazon Web Services SSO Store API Reference. Either IdentityName or IdentityId must be specified.

", "DescribeNotebookExecutionInput$NotebookExecutionId": "

The unique identifier of the notebook execution.

", "DescribeStudioInput$StudioId": "

The Amazon EMR Studio ID.

", "ExecutionEngineConfig$Id": "

The unique identifier of the execution engine. For an EMR cluster, this is the cluster ID.

", "ExecutionEngineConfig$MasterInstanceSecurityGroupId": "

An optional unique ID of an EC2 security group to associate with the master instance of the EMR cluster for this notebook execution. For more information see Specifying EC2 Security Groups for EMR Notebooks in the EMR Management Guide.

", "GetStudioSessionMappingInput$StudioId": "

The ID of the Amazon EMR Studio.

", - "GetStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", - "GetStudioSessionMappingInput$IdentityName": "

The name of the user or group to fetch. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", + "GetStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", + "GetStudioSessionMappingInput$IdentityName": "

The name of the user or group to fetch. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", "InstanceFleet$Name": "

A friendly name for the instance fleet.

", "InstanceFleetConfig$Name": "

The friendly name of the instance fleet.

", "InstanceGroupConfig$Name": "

Friendly name given to the instance group.

", @@ -2081,8 +2139,8 @@ "JobFlowInstancesConfig$Ec2KeyName": "

The name of the EC2 key pair that can be used to connect to the master node using SSH as the user called \"hadoop.\"

", "JobFlowInstancesConfig$HadoopVersion": "

Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are \"0.18\" (no longer maintained), \"0.20\" (no longer maintained), \"0.20.205\" (no longer maintained), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the AmiVersion parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.

", "JobFlowInstancesConfig$Ec2SubnetId": "

Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value and your account supports EC2-Classic, the cluster launches in EC2-Classic.

", - "JobFlowInstancesConfig$EmrManagedMasterSecurityGroup": "

The identifier of the Amazon EC2 security group for the master node.

", - "JobFlowInstancesConfig$EmrManagedSlaveSecurityGroup": "

The identifier of the Amazon EC2 security group for the core and task nodes.

", + "JobFlowInstancesConfig$EmrManagedMasterSecurityGroup": "

The identifier of the Amazon EC2 security group for the master node. If you specify EmrManagedMasterSecurityGroup, you must also specify EmrManagedSlaveSecurityGroup.

", + "JobFlowInstancesConfig$EmrManagedSlaveSecurityGroup": "

The identifier of the Amazon EC2 security group for the core and task nodes. If you specify EmrManagedSlaveSecurityGroup, you must also specify EmrManagedMasterSecurityGroup.

", "JobFlowInstancesConfig$ServiceAccessSecurityGroup": "

The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.

", "JobFlowInstancesDetail$Ec2KeyName": "

The name of an Amazon EC2 key pair that can be used to connect to the master node using SSH.

", "JobFlowInstancesDetail$Ec2SubnetId": "

For clusters launched within Amazon Virtual Private Cloud, this is the identifier of the subnet where the cluster was launched.

", @@ -2111,11 +2169,11 @@ "SecurityGroupsList$member": null, "SessionMappingDetail$StudioId": "

The ID of the Amazon EMR Studio.

", "SessionMappingDetail$IdentityId": "

The globally unique identifier (GUID) of the user or group.

", - "SessionMappingDetail$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

", + "SessionMappingDetail$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference.

", "SessionMappingDetail$SessionPolicyArn": "

The Amazon Resource Name (ARN) of the session policy associated with the user or group.

", "SessionMappingSummary$StudioId": "

The ID of the Amazon EMR Studio.

", - "SessionMappingSummary$IdentityId": "

The globally unique identifier (GUID) of the user or group from the AWS SSO Identity Store.

", - "SessionMappingSummary$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference.

", + "SessionMappingSummary$IdentityId": "

The globally unique identifier (GUID) of the user or group from the Amazon Web Services SSO Identity Store.

", + "SessionMappingSummary$IdentityName": "

The name of the user or group. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference.

", "SessionMappingSummary$SessionPolicyArn": "

The Amazon Resource Name (ARN) of the session policy associated with the user or group.

", "StartNotebookExecutionInput$EditorId": "

The unique identifier of the EMR Notebook to use for notebook execution.

", "StartNotebookExecutionInput$NotebookExecutionName": "

An optional name for the notebook execution.

", @@ -2142,8 +2200,8 @@ "UpdateStudioInput$Name": "

A descriptive name for the Amazon EMR Studio.

", "UpdateStudioInput$Description": "

A detailed description to assign to the Amazon EMR Studio.

", "UpdateStudioSessionMappingInput$StudioId": "

The ID of the Amazon EMR Studio.

", - "UpdateStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", - "UpdateStudioSessionMappingInput$IdentityName": "

The name of the user or group to update. For more information, see UserName and DisplayName in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", + "UpdateStudioSessionMappingInput$IdentityId": "

The globally unique identifier (GUID) of the user or group. For more information, see UserId and GroupId in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", + "UpdateStudioSessionMappingInput$IdentityName": "

The name of the user or group to update. For more information, see UserName and DisplayName in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName or IdentityId must be specified.

", "UpdateStudioSessionMappingInput$SessionPolicyArn": "

The Amazon Resource Name (ARN) of the session policy to associate with the specified user or group.

", "XmlStringMaxLen256List$member": null } diff --git a/models/apis/elasticmapreduce/2009-03-31/paginators-1.json b/models/apis/elasticmapreduce/2009-03-31/paginators-1.json index d5b5407b34..5ea61f9252 100644 --- a/models/apis/elasticmapreduce/2009-03-31/paginators-1.json +++ b/models/apis/elasticmapreduce/2009-03-31/paginators-1.json @@ -33,6 +33,11 @@ "output_token": "Marker", "result_key": "NotebookExecutions" }, + "ListReleaseLabels": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken" + }, "ListSecurityConfigurations": { "input_token": "Marker", "output_token": "Marker", diff --git a/models/apis/iam/2010-05-08/docs-2.json b/models/apis/iam/2010-05-08/docs-2.json index 8aa5232c8f..9319cfd24c 100644 --- a/models/apis/iam/2010-05-08/docs-2.json +++ b/models/apis/iam/2010-05-08/docs-2.json @@ -14,7 +14,7 @@ "CreateGroup": "

Creates a new group.

For information about the number of groups you can create, see IAM and STS quotas in the IAM User Guide.

", "CreateInstanceProfile": "

Creates a new instance profile. For information about instance profiles, see Using roles for applications on Amazon EC2 in the IAM User Guide, and Instance profiles in the Amazon EC2 User Guide.

For information about the number of instance profiles you can create, see IAM object quotas in the IAM User Guide.

", "CreateLoginProfile": "

Creates a password for the specified IAM user. A password allows an IAM user to access Amazon Web Services services through the Management Console.

You can use the CLI, the Amazon Web Services API, or the Users page in the IAM console to create a password for any IAM user. Use ChangePassword to update your own existing password in the My Security Credentials page in the Management Console.

For more information about managing passwords, see Managing passwords in the IAM User Guide.

", - "CreateOpenIDConnectProvider": "

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.

If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.

When you create the IAM OIDC provider, you specify the following:

You get all of this information from the OIDC IdP that you want to use to access Amazon Web Services.

The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

", + "CreateOpenIDConnectProvider": "

Creates an IAM entity to describe an identity provider (IdP) that supports OpenID Connect (OIDC).

The OIDC provider that you create with this operation can be used as a principal in a role's trust policy. Such a policy establishes a trust relationship between Amazon Web Services and the OIDC provider.

If you are using an OIDC identity provider from Google, Facebook, or Amazon Cognito, you don't need to create a separate IAM identity provider. These OIDC identity providers are already built-in to Amazon Web Services and are available for your use. Instead, you can move directly to creating new roles using your identity provider. To learn more, see Creating a role for web identity or OpenID connect federation in the IAM User Guide.

When you create the IAM OIDC provider, you specify the following:

You get all of this information from the OIDC IdP that you want to use to access Amazon Web Services.

Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. These OIDC IdPs include Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation.

The trust for the OIDC provider is derived from the IAM provider that this operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider operation to highly privileged users.

", "CreatePolicy": "

Creates a new managed policy for your account.

This operation creates a policy version with a version identifier of v1 and sets v1 as the policy's default version. For more information about policy versions, see Versioning for managed policies in the IAM User Guide.

As a best practice, you can validate your IAM policies. To learn more, see Validating IAM policies in the IAM User Guide.

For more information about managed policies in general, see Managed policies and inline policies in the IAM User Guide.

", "CreatePolicyVersion": "

Creates a new version of the specified managed policy. To update a managed policy, you create a new policy version. A managed policy can have up to five versions. If the policy has five versions, you must delete an existing version using DeletePolicyVersion before you create a new version.

Optionally, you can set the new version as the policy's default version. The default version is the version that is in effect for the IAM users, groups, and roles to which the policy is attached.

For more information about managed policy versions, see Versioning for managed policies in the IAM User Guide.

", "CreateRole": "

Creates a new role for your account. For more information about roles, see IAM roles. For information about quotas for role names and the number of roles you can create, see IAM and STS quotas in the IAM User Guide.

", @@ -148,7 +148,7 @@ "UpdateAssumeRolePolicy": "

Updates the policy that grants an IAM entity permission to assume a role. This is typically referred to as the \"role trust policy\". For more information about roles, see Using roles to delegate permissions and federate identities.

", "UpdateGroup": "

Updates the name and/or the path of the specified IAM group.

You should understand the implications of changing a group's path or name. For more information, see Renaming users and groups in the IAM User Guide.

The person making the request (the principal), must have permission to change the role group with the old name and the new name. For example, to change the group named Managers to MGRs, the principal must have a policy that allows them to update both groups. If the principal has permission to update the Managers group, but not the MGRs group, then the update fails. For more information about permissions, see Access management.

", "UpdateLoginProfile": "

Changes the password for the specified IAM user. You can use the CLI, the Amazon Web Services API, or the Users page in the IAM console to change the password for any IAM user. Use ChangePassword to change your own password in the My Security Credentials page in the Management Console.

For more information about modifying passwords, see Managing passwords in the IAM User Guide.

", - "UpdateOpenIDConnectProviderThumbprint": "

Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.

The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.)

Typically, you need to update a thumbprint only when the identity provider's certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.

Trust for the OIDC provider is derived from the provider's certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint operation to highly privileged users.

", + "UpdateOpenIDConnectProviderThumbprint": "

Replaces the existing list of server certificate thumbprints associated with an OpenID Connect (OIDC) provider resource object with a new list of thumbprints.

The list that you pass with this operation completely replaces the existing list of thumbprints. (The lists are not merged.)

Typically, you need to update a thumbprint only when the identity provider certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated.

Amazon Web Services secures communication with some OIDC identity providers (IdPs) through our library of trusted certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP server certificate. These OIDC IdPs include Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint remains in your configuration, but is no longer used for validation.

Trust for the OIDC provider is derived from the provider certificate and is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint operation to highly privileged users.

", "UpdateRole": "

Updates the description or maximum session duration setting of a role.

", "UpdateRoleDescription": "

Use UpdateRole instead.

Modifies only the description of a role. This operation performs the same function as the Description parameter in the UpdateRole operation.

", "UpdateSAMLProvider": "

Updates the metadata document for an existing SAML provider resource object.

This operation requires Signature Version 4.

", diff --git a/models/apis/kendra/2019-02-03/api-2.json b/models/apis/kendra/2019-02-03/api-2.json index 025aed25ee..6d1cffbe1d 100644 --- a/models/apis/kendra/2019-02-03/api-2.json +++ b/models/apis/kendra/2019-02-03/api-2.json @@ -1353,7 +1353,8 @@ "ServiceNowConfiguration":{"shape":"ServiceNowConfiguration"}, "ConfluenceConfiguration":{"shape":"ConfluenceConfiguration"}, "GoogleDriveConfiguration":{"shape":"GoogleDriveConfiguration"}, - "WebCrawlerConfiguration":{"shape":"WebCrawlerConfiguration"} + "WebCrawlerConfiguration":{"shape":"WebCrawlerConfiguration"}, + "WorkDocsConfiguration":{"shape":"WorkDocsConfiguration"} } }, "DataSourceDateFieldFormat":{ @@ -1516,7 +1517,8 @@ "CUSTOM", "CONFLUENCE", "GOOGLEDRIVE", - "WEBCRAWLER" + "WEBCRAWLER", + "WORKDOCS" ] }, "DataSourceVpcConfiguration":{ @@ -2669,6 +2671,12 @@ "DESCENDING" ] }, + "OrganizationId":{ + "type":"string", + "max":12, + "min":12, + "pattern":"d-[0-9a-fA-F]{10}" + }, "Port":{ "type":"integer", "max":65535, @@ -3804,6 +3812,18 @@ "SUBDOMAINS", "EVERYTHING" ] + }, + "WorkDocsConfiguration":{ + "type":"structure", + "required":["OrganizationId"], + "members":{ + "OrganizationId":{"shape":"OrganizationId"}, + "CrawlComments":{"shape":"Boolean"}, + "UseChangeLog":{"shape":"Boolean"}, + "InclusionPatterns":{"shape":"DataSourceInclusionsExclusionsStrings"}, + "ExclusionPatterns":{"shape":"DataSourceInclusionsExclusionsStrings"}, + "FieldMappings":{"shape":"DataSourceToIndexFieldMappingList"} + } } } } diff --git a/models/apis/kendra/2019-02-03/docs-2.json b/models/apis/kendra/2019-02-03/docs-2.json index a716ed6dfe..e1db3fc9e8 100644 --- a/models/apis/kendra/2019-02-03/docs-2.json +++ b/models/apis/kendra/2019-02-03/docs-2.json @@ -37,7 +37,7 @@ "Query": "

Searches an active index. Use this API to search your documents using query. The Query operation enables to do faceted search and to filter results based on document attributes.

It also enables you to provide user context that Amazon Kendra uses to enforce document access control in the search results.

Amazon Kendra searches your index for text content and question and answer (FAQ) content. By default the response contains three types of results.

You can specify that the query return only one type of result using the QueryResultTypeConfig parameter.

Each query returns the 100 most relevant results.

", "StartDataSourceSyncJob": "

Starts a synchronization job for a data source. If a synchronization job is already in progress, Amazon Kendra returns a ResourceInUseException exception.

", "StopDataSourceSyncJob": "

Stops a running synchronization job. You can't stop a scheduled synchronization job.

", - "SubmitFeedback": "

Enables you to provide feedback to Amazon Kendra to improve the performance of the service.

", + "SubmitFeedback": "

Enables you to provide feedback to Amazon Kendra to improve the performance of your index.

", "TagResource": "

Adds the specified tag to the specified index, FAQ, or data source resource. If the tag already exists, the existing value is replaced with the new value.

", "UntagResource": "

Removes a tag from an index, FAQ, or a data source.

", "UpdateDataSource": "

Updates an existing Amazon Kendra data source.

", @@ -97,7 +97,7 @@ } }, "AttributeFilter": { - "base": "

Provides filtering the query results based on document attributes.

When you use the AndAllFilters or OrAllFilters, filters you can use 2 layers under the first attribute filter. For example, you can use:

<AndAllFilters>

  1. <OrAllFilters>

  2. <EqualTo>

If you use more than 2 layers, you receive a ValidationException exception with the message \"AttributeFilter cannot have a depth of more than 2.\"

", + "base": "

Provides filtering the query results based on document attributes.

When you use the AndAllFilters or OrAllFilters, filters you can use 2 layers under the first attribute filter. For example, you can use:

<AndAllFilters>

  1. <OrAllFilters>

  2. <EqualTo>

If you use more than 2 layers, you receive a ValidationException exception with the message \"AttributeFilter cannot have a depth of more than 2.\"

If you use more than 10 attribute filters, you receive a ValidationException exception with the message \"AttributeFilter cannot have a length of more than 10\".

", "refs": { "AttributeFilter$NotFilter": "

Performs a logical NOT operation on all supplied filters.

", "AttributeFilterList$member": null, @@ -218,7 +218,9 @@ "ServiceNowServiceCatalogConfiguration$CrawlAttachments": "

Indicates whether Amazon Kendra should crawl attachments to the service catalog items.

", "SharePointConfiguration$CrawlAttachments": "

TRUE to include attachments to documents stored in your Microsoft SharePoint site in the index; otherwise, FALSE.

", "SharePointConfiguration$UseChangeLog": "

Set to TRUE to use the Microsoft SharePoint change log to determine the documents that need to be updated in the index. Depending on the size of the SharePoint change log, it may take longer for Amazon Kendra to use the change log than it takes it to determine the changed documents using the Amazon Kendra document crawler.

", - "SharePointConfiguration$DisableLocalGroups": "

A Boolean value that specifies whether local groups are disabled (True) or enabled (False).

" + "SharePointConfiguration$DisableLocalGroups": "

A Boolean value that specifies whether local groups are disabled (True) or enabled (False).

", + "WorkDocsConfiguration$CrawlComments": "

TRUE to include comments on documents in your index. Including comments in your index means each comment is a document that can be searched on.

The default is set to FALSE.

", + "WorkDocsConfiguration$UseChangeLog": "

TRUE to use the change logs to update documents in your index instead of scanning all documents.

If you are syncing your Amazon WorkDocs data source with your index for the first time, all documents are scanned. After your first sync, you can use the change logs to update your documents in your index for future syncs.

The default is set to FALSE.

" } }, "CapacityUnitsConfiguration": { @@ -571,7 +573,9 @@ "SharePointConfiguration$InclusionPatterns": "

A list of regular expression patterns. Documents that match the patterns are included in the index. Documents that don't match the patterns are excluded from the index. If a document matches both an inclusion pattern and an exclusion pattern, the document is not included in the index.

The regex is applied to the display URL of the SharePoint document.

", "SharePointConfiguration$ExclusionPatterns": "

A list of regular expression patterns. Documents that match the patterns are excluded from the index. Documents that don't match the patterns are included in the index. If a document matches both an exclusion pattern and an inclusion pattern, the document is not included in the index.

The regex is applied to the display URL of the SharePoint document.

", "WebCrawlerConfiguration$UrlInclusionPatterns": "

The regular expression pattern to include certain URLs to crawl.

If there is a regular expression pattern to exclude certain URLs that conflicts with the include pattern, the exclude pattern takes precedence.

", - "WebCrawlerConfiguration$UrlExclusionPatterns": "

The regular expression pattern to exclude certain URLs to crawl.

If there is a regular expression pattern to include certain URLs that conflicts with the exclude pattern, the exclude pattern takes precedence.

" + "WebCrawlerConfiguration$UrlExclusionPatterns": "

The regular expression pattern to exclude certain URLs to crawl.

If there is a regular expression pattern to include certain URLs that conflicts with the exclude pattern, the exclude pattern takes precedence.

", + "WorkDocsConfiguration$InclusionPatterns": "

A list of regular expression patterns to include certain files in your Amazon WorkDocs site repository. Files that match the patterns are included in the index. Files that don't match the patterns are excluded from the index. If a file matches both an inclusion pattern and an exclusion pattern, the exclusion pattern takes precedence and the file isn’t included in the index.

", + "WorkDocsConfiguration$ExclusionPatterns": "

A list of regular expression patterns to exclude certain files in your Amazon WorkDocs site repository. Files that match the patterns are excluded from the index. Files that don’t match the patterns are included in the index. If a file matches both an inclusion pattern and an exclusion pattern, the exclusion pattern takes precedence and the file isn’t included in the index.

" } }, "DataSourceInclusionsExclusionsStringsMember": { @@ -664,7 +668,8 @@ "SalesforceStandardObjectConfiguration$FieldMappings": "

One or more objects that map fields in the standard object to Amazon Kendra index fields. The index field must exist before you can map a Salesforce field to it.

", "ServiceNowKnowledgeArticleConfiguration$FieldMappings": "

Mapping between ServiceNow fields and Amazon Kendra index fields. You must create the index field before you map the field.

", "ServiceNowServiceCatalogConfiguration$FieldMappings": "

Mapping between ServiceNow fields and Amazon Kendra index fields. You must create the index field before you map the field.

", - "SharePointConfiguration$FieldMappings": "

A list of DataSourceToIndexFieldMapping objects that map Microsoft SharePoint attributes to custom fields in the Amazon Kendra index. You must first create the index fields using the UpdateIndex operation before you map SharePoint attributes. For more information, see Mapping Data Source Fields.

" + "SharePointConfiguration$FieldMappings": "

A list of DataSourceToIndexFieldMapping objects that map Microsoft SharePoint attributes to custom fields in the Amazon Kendra index. You must first create the index fields using the UpdateIndex operation before you map SharePoint attributes. For more information, see Mapping Data Source Fields.

", + "WorkDocsConfiguration$FieldMappings": "

A list of DataSourceToIndexFieldMapping objects that map Amazon WorkDocs field names to custom index field names in Amazon Kendra. You must first create the custom index fields using the UpdateIndex operation before you map to Amazon WorkDocs fields. For more information, see Mapping Data Source Fields. The Amazon WorkDocs data source field names need to exist in your Amazon WorkDocs custom metadata.

" } }, "DataSourceType": { @@ -1272,7 +1277,7 @@ "IndexEdition": { "base": null, "refs": { - "CreateIndexRequest$Edition": "

The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION for your production databases. Once you set the edition for an index, it can't be changed.

The Edition parameter is optional. If you don't supply a value, the default is ENTERPRISE_EDITION.

", + "CreateIndexRequest$Edition": "

The Amazon Kendra edition to use for the index. Choose DEVELOPER_EDITION for indexes intended for development, testing, or proof of concept. Use ENTERPRISE_EDITION for your production databases. Once you set the edition for an index, it can't be changed.

The Edition parameter is optional. If you don't supply a value, the default is ENTERPRISE_EDITION.

For more information on quota limits for enterprise and developer editions, see Quotas.

", "DescribeIndexResponse$Edition": "

The Amazon Kendra edition used for the index. You decide the edition when you create the index.

", "IndexConfigurationSummary$Edition": "

Indicates whether the index is a enterprise edition index or a developer edition index.

" } @@ -1705,6 +1710,12 @@ "Relevance$RankOrder": "

Determines how values should be interpreted.

When the RankOrder field is ASCENDING, higher numbers are better. For example, a document with a rating score of 10 is higher ranking than a document with a rating score of 1.

When the RankOrder field is DESCENDING, lower numbers are better. For example, in a task tracking application, a priority 1 task is more important than a priority 5 task.

Only applies to LONG and DOUBLE fields.

" } }, + "OrganizationId": { + "base": null, + "refs": { + "WorkDocsConfiguration$OrganizationId": "

The identifier of the directory corresponding to your Amazon WorkDocs site repository.

You can find the organization ID in the AWS Directory Service by going to Active Directory, then Directories. Your Amazon WorkDocs site directory has an ID, which is the organization ID. You can also set up a new Amazon WorkDocs directory in the AWS Directory Service console and enable a Amazon WorkDocs site for the directory in the Amazon WorkDocs console.

" + } + }, "Port": { "base": null, "refs": { @@ -1770,7 +1781,7 @@ "QueryCapacityUnit": { "base": null, "refs": { - "CapacityUnitsConfiguration$QueryCapacityUnits": "

The amount of extra query capacity for an index and GetQuerySuggestions capacity.

A single extra capacity unit for an index provides 0.5 queries per second or approximately 40,000 queries per day.

GetQuerySuggestions capacity is 5 times the provisioned query capacity for an index. For example, the base capacity for an index is 0.5 queries per second, so GetQuerySuggestions capacity is 2.5 calls per second. If adding another 0.5 queries per second to total 1 queries per second for an index, the GetQuerySuggestions capacity is 5 calls per second.

" + "CapacityUnitsConfiguration$QueryCapacityUnits": "

The amount of extra query capacity for an index and GetQuerySuggestions capacity.

A single extra capacity unit for an index provides 0.1 queries per second or approximately 8,000 queries per day.

GetQuerySuggestions capacity is five times the provisioned query capacity for an index, or the base capacity of 2.5 calls per second, whichever is higher. For example, the base capacity for an index is 0.1 queries per second, and GetQuerySuggestions capacity has a base of 2.5 calls per second. If you add another 0.1 queries per second to total 0.2 queries per second for an index, the GetQuerySuggestions capacity is 2.5 calls per second (higher than five times 0.2 queries per second).

" } }, "QueryId": { @@ -2113,13 +2124,13 @@ "refs": { "BasicAuthenticationConfiguration$Credentials": "

Your secret ARN, which you can create in AWS Secrets Manager

You use a secret if basic authentication credentials are required to connect to a website. The secret stores your credentials of user name and password.

", "ConfluenceConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of an Secrets Managersecret that contains the key/value pairs required to connect to your Confluence server. The secret must contain a JSON structure with the following keys:

", - "ConnectionConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager. The credentials should be a user/password pair. For more information, see Using a Database Data Source. For more information about AWS Secrets Manager, see What Is AWS Secrets Manager in the Secrets Manager user guide.

", + "ConnectionConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of credentials stored in Secrets Manager. The credentials should be a user/password pair. For more information, see Using a Database Data Source. For more information about Secrets Manager, see What Is Secrets Manager in the Secrets Manager user guide.

", "GoogleDriveConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of a Secrets Managersecret that contains the credentials required to connect to Google Drive. For more information, see Using a Google Workspace Drive data source.

", "OneDriveConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of an Secrets Managersecret that contains the user name and password to connect to OneDrive. The user namd should be the application ID for the OneDrive application, and the password is the application key for the OneDrive application.

", "ProxyConfiguration$Credentials": "

Your secret ARN, which you can create in AWS Secrets Manager

The credentials are optional. You use a secret if web proxy credentials are required to connect to a website host. Amazon Kendra currently support basic authentication to connect to a web proxy server. The secret stores your credentials.

", "SalesforceConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of an Secrets Managersecret that contains the key/value pairs required to connect to your Salesforce instance. The secret must contain a JSON structure with the following keys:

", "ServiceNowConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of the Secrets Manager secret that contains the user name and password required to connect to the ServiceNow instance.

", - "SharePointConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager. The credentials should be a user/password pair. If you use SharePoint Sever, you also need to provide the sever domain name as part of the credentials. For more information, see Using a Microsoft SharePoint Data Source. For more information about AWS Secrets Manager, see What Is AWS Secrets Manager in the Secrets Manager user guide.

" + "SharePointConfiguration$SecretArn": "

The Amazon Resource Name (ARN) of credentials stored in Secrets Manager. The credentials should be a user/password pair. If you use SharePoint Server, you also need to provide the sever domain name as part of the credentials. For more information, see Using a Microsoft SharePoint Data Source. For more information about Secrets Manager, see What Is Secrets Manager in the Secrets Manager user guide.

" } }, "SecurityGroupIdList": { @@ -2284,7 +2295,7 @@ "StorageCapacityUnit": { "base": null, "refs": { - "CapacityUnitsConfiguration$StorageCapacityUnits": "

The amount of extra storage capacity for an index. A single capacity unit for an index provides 150 GB of storage space or 500,000 documents, whichever is reached first.

" + "CapacityUnitsConfiguration$StorageCapacityUnits": "

The amount of extra storage capacity for an index. A single capacity unit provides 30 GB of storage space or 100,000 documents, whichever is reached first.

" } }, "String": { @@ -2669,6 +2680,12 @@ "refs": { "SeedUrlConfiguration$WebCrawlerMode": "

You can choose one of the following modes:

The default mode is set to HOST_ONLY.

" } + }, + "WorkDocsConfiguration": { + "base": "

Provides the configuration information to connect to Amazon WorkDocs as your data source.

Amazon WorkDocs connector is available in Oregon, North Virginia, Sydney, Singapore and Ireland regions.

", + "refs": { + "DataSourceConfiguration$WorkDocsConfiguration": "

Provides the configuration information to connect to WorkDocs as your data source.

" + } } } } diff --git a/models/apis/lambda/2015-03-31/api-2.json b/models/apis/lambda/2015-03-31/api-2.json index 98a470ccb3..fdb414a096 100644 --- a/models/apis/lambda/2015-03-31/api-2.json +++ b/models/apis/lambda/2015-03-31/api-2.json @@ -226,7 +226,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} ] }, "DeleteLayerVersion":{ @@ -789,7 +790,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} ] }, "PutProvisionedConcurrencyConfig":{ @@ -980,7 +982,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"ResourceConflictException"} ] } }, diff --git a/models/apis/lambda/2015-03-31/docs-2.json b/models/apis/lambda/2015-03-31/docs-2.json index 5236260190..4d101d3ab8 100644 --- a/models/apis/lambda/2015-03-31/docs-2.json +++ b/models/apis/lambda/2015-03-31/docs-2.json @@ -313,7 +313,7 @@ "refs": { "CreateEventSourceMappingRequest$StartingPositionTimestamp": "

With StartingPosition set to AT_TIMESTAMP, the time from which to start reading.

", "EventSourceMappingConfiguration$StartingPositionTimestamp": "

With StartingPosition set to AT_TIMESTAMP, the time from which to start reading.

", - "EventSourceMappingConfiguration$LastModified": "

The date that the event source mapping was last updated, or its state changed.

", + "EventSourceMappingConfiguration$LastModified": "

The date that the event source mapping was last updated or that its state changed.

", "FunctionEventInvokeConfig$LastModified": "

The date and time that the configuration was last updated.

" } }, @@ -522,7 +522,7 @@ } }, "EventSourceMappingConfiguration": { - "base": "

A mapping between an Amazon Web Services resource and an Lambda function. See CreateEventSourceMapping for details.

", + "base": "

A mapping between an Amazon Web Services resource and a Lambda function. For details, see CreateEventSourceMapping.

", "refs": { "EventSourceMappingsList$member": null } @@ -537,7 +537,7 @@ "base": null, "refs": { "CreateEventSourceMappingRequest$StartingPosition": "

The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is only supported for Amazon Kinesis streams.

", - "EventSourceMappingConfiguration$StartingPosition": "

The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is only supported for Amazon Kinesis streams.

" + "EventSourceMappingConfiguration$StartingPosition": "

The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK stream sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams.

" } }, "EventSourceToken": { @@ -1226,7 +1226,7 @@ "refs": { "ListAliasesRequest$MaxItems": "

Limit the number of aliases returned.

", "ListCodeSigningConfigsRequest$MaxItems": "

Maximum number of items to return.

", - "ListEventSourceMappingsRequest$MaxItems": "

The maximum number of event source mappings to return.

", + "ListEventSourceMappingsRequest$MaxItems": "

The maximum number of event source mappings to return. Note that ListEventSourceMappings returns a maximum of 100 items in each response, even if you set the number higher.

", "ListFunctionsByCodeSigningConfigRequest$MaxItems": "

Maximum number of items to return.

", "ListFunctionsRequest$MaxItems": "

The maximum number of functions to return in the response. Note that ListFunctions returns a maximum of 50 items in each response, even if you set the number higher.

", "ListVersionsByFunctionRequest$MaxItems": "

The maximum number of versions to return. Note that ListVersionsByFunction returns a maximum of 50 items in each response, even if you set the number higher.

" @@ -1242,7 +1242,7 @@ "base": null, "refs": { "CreateEventSourceMappingRequest$MaximumBatchingWindowInSeconds": "

(Streams and SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds.

", - "EventSourceMappingConfiguration$MaximumBatchingWindowInSeconds": "

(Streams and SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds. The default value is zero.

", + "EventSourceMappingConfiguration$MaximumBatchingWindowInSeconds": "

(Streams and Amazon SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds. The default value is zero.

", "UpdateEventSourceMappingRequest$MaximumBatchingWindowInSeconds": "

(Streams and SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds.

" } }, @@ -1350,7 +1350,7 @@ "base": null, "refs": { "CreateEventSourceMappingRequest$ParallelizationFactor": "

(Streams only) The number of batches to process from each shard concurrently.

", - "EventSourceMappingConfiguration$ParallelizationFactor": "

(Streams only) The number of batches to process from each shard concurrently. The default value is 1.

", + "EventSourceMappingConfiguration$ParallelizationFactor": "

(Streams only) The number of batches to process concurrently from each shard. The default value is 1.

", "UpdateEventSourceMappingRequest$ParallelizationFactor": "

(Streams only) The number of batches to process from each shard concurrently.

" } }, @@ -1478,7 +1478,7 @@ "base": null, "refs": { "CreateEventSourceMappingRequest$Queues": "

(MQ) The name of the Amazon MQ broker destination queue to consume.

", - "EventSourceMappingConfiguration$Queues": "

(MQ) The name of the Amazon MQ broker destination queue to consume.

" + "EventSourceMappingConfiguration$Queues": "

(Amazon MQ) The name of the Amazon MQ broker destination queue to consume.

" } }, "RemoveLayerVersionPermissionRequest": { @@ -1587,10 +1587,10 @@ } }, "SelfManagedEventSource": { - "base": "

The Self-Managed Apache Kafka cluster for your event source.

", + "base": "

The self-managed Apache Kafka cluster for your event source.

", "refs": { "CreateEventSourceMappingRequest$SelfManagedEventSource": "

The Self-Managed Apache Kafka cluster to send records.

", - "EventSourceMappingConfiguration$SelfManagedEventSource": "

The Self-Managed Apache Kafka cluster for your event source.

" + "EventSourceMappingConfiguration$SelfManagedEventSource": "

The self-managed Apache Kafka cluster for your event source.

" } }, "SensitiveString": { @@ -1612,7 +1612,7 @@ } }, "SourceAccessConfiguration": { - "base": "

You can specify the authentication protocol, or the VPC components to secure access to your event source.

", + "base": "

To secure and define access to your event source, you can specify the authentication protocol, VPC components, or virtual host.

", "refs": { "SourceAccessConfigurations$member": null } @@ -1620,15 +1620,15 @@ "SourceAccessConfigurations": { "base": null, "refs": { - "CreateEventSourceMappingRequest$SourceAccessConfigurations": "

An array of the authentication protocol, or the VPC components to secure your event source.

", - "EventSourceMappingConfiguration$SourceAccessConfigurations": "

An array of the authentication protocol, or the VPC components to secure your event source.

", - "UpdateEventSourceMappingRequest$SourceAccessConfigurations": "

An array of the authentication protocol, or the VPC components to secure your event source.

" + "CreateEventSourceMappingRequest$SourceAccessConfigurations": "

An array of authentication protocols or VPC components required to secure your event source.

", + "EventSourceMappingConfiguration$SourceAccessConfigurations": "

An array of the authentication protocol, VPC components, or virtual host to secure and define your event source.

", + "UpdateEventSourceMappingRequest$SourceAccessConfigurations": "

An array of authentication protocols or VPC components required to secure your event source.

" } }, "SourceAccessType": { "base": null, "refs": { - "SourceAccessConfiguration$Type": "

The type of authentication protocol or the VPC components for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\".

" + "SourceAccessConfiguration$Type": "

The type of authentication protocol, VPC components, or virtual host for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\".

" } }, "SourceOwner": { @@ -1698,9 +1698,9 @@ "ENILimitReachedException$Message": null, "EnvironmentError$ErrorCode": "

The error code.

", "EventSourceMappingConfiguration$UUID": "

The identifier of the event source mapping.

", - "EventSourceMappingConfiguration$LastProcessingResult": "

The result of the last Lambda invocation of your Lambda function.

", + "EventSourceMappingConfiguration$LastProcessingResult": "

The result of the last Lambda invocation of your function.

", "EventSourceMappingConfiguration$State": "

The state of the event source mapping. It can be one of the following: Creating, Enabling, Enabled, Disabling, Disabled, Updating, or Deleting.

", - "EventSourceMappingConfiguration$StateTransitionReason": "

Indicates whether the last change to the event source mapping was made by a user, or by the Lambda service.

", + "EventSourceMappingConfiguration$StateTransitionReason": "

Indicates whether a user or Lambda made the last change to the event source mapping.

", "FunctionCode$ImageUri": "

URI of a container image in the Amazon ECR registry.

", "FunctionCodeLocation$RepositoryType": "

The service that's hosting the file.

", "FunctionCodeLocation$Location": "

A presigned URL that you can use to download the deployment package.

", @@ -1930,7 +1930,7 @@ "base": null, "refs": { "CreateEventSourceMappingRequest$TumblingWindowInSeconds": "

(Streams only) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.

", - "EventSourceMappingConfiguration$TumblingWindowInSeconds": "

(Streams only) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.

", + "EventSourceMappingConfiguration$TumblingWindowInSeconds": "

(Streams only) The duration in seconds of a processing window. The range is 1–900 seconds.

", "UpdateEventSourceMappingRequest$TumblingWindowInSeconds": "

(Streams only) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.

" } }, diff --git a/models/apis/personalize/2018-05-22/api-2.json b/models/apis/personalize/2018-05-22/api-2.json index 64cb86a861..a7a00ada83 100644 --- a/models/apis/personalize/2018-05-22/api-2.json +++ b/models/apis/personalize/2018-05-22/api-2.json @@ -923,8 +923,7 @@ "type":"structure", "required":[ "name", - "solutionVersionArn", - "minProvisionedTPS" + "solutionVersionArn" ], "members":{ "name":{"shape":"Name"}, diff --git a/models/apis/personalize/2018-05-22/docs-2.json b/models/apis/personalize/2018-05-22/docs-2.json index cf5bd55112..cfccb2d5f7 100644 --- a/models/apis/personalize/2018-05-22/docs-2.json +++ b/models/apis/personalize/2018-05-22/docs-2.json @@ -5,9 +5,9 @@ "CreateBatchInferenceJob": "

Creates a batch inference job. The operation can handle up to 50 million records and the input file must be in JSON format. For more information, see recommendations-batch.

", "CreateCampaign": "

Creates a campaign by deploying a solution version. When a client calls the GetRecommendations and GetPersonalizedRanking APIs, a campaign is specified in the request.

Minimum Provisioned TPS and Auto-Scaling

A transaction is a single GetRecommendations or GetPersonalizedRanking call. Transactions per second (TPS) is the throughput and unit of billing for Amazon Personalize. The minimum provisioned TPS (minProvisionedTPS) specifies the baseline throughput provisioned by Amazon Personalize, and thus, the minimum billing charge.

If your TPS increases beyond minProvisionedTPS, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minProvisionedTPS. There's a short time delay while the capacity is increased that might cause loss of transactions.

The actual TPS used is calculated as the average requests/second within a 5-minute window. You pay for maximum of either the minimum provisioned TPS or the actual TPS. We recommend starting with a low minProvisionedTPS, track your usage using Amazon CloudWatch metrics, and then increase the minProvisionedTPS as necessary.

Status

A campaign can be in one of the following states:

To get the campaign status, call DescribeCampaign.

Wait until the status of the campaign is ACTIVE before asking the campaign for recommendations.

Related APIs

", "CreateDataset": "

Creates an empty dataset and adds it to the specified dataset group. Use CreateDatasetImportJob to import your training data to a dataset.

There are three types of datasets:

Each dataset type has an associated schema with required field types. Only the Interactions dataset is required in order to train a model (also referred to as creating a solution).

A dataset can be in one of the following states:

To get the status of the dataset, call DescribeDataset.

Related APIs

", - "CreateDatasetExportJob": "

Creates a job that exports data from your dataset to an Amazon S3 bucket. To allow Amazon Personalize to export the training data, you must specify an service-linked AWS Identity and Access Management (IAM) role that gives Amazon Personalize PutObject permissions for your Amazon S3 bucket. For information, see Exporting a dataset in the Amazon Personalize developer guide.

Status

A dataset export job can be in one of the following states:

To get the status of the export job, call DescribeDatasetExportJob, and specify the Amazon Resource Name (ARN) of the dataset export job. The dataset export is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the job failed.

", - "CreateDatasetGroup": "

Creates an empty dataset group. A dataset group contains related datasets that supply data for training a model. A dataset group can contain at most three datasets, one for each type of dataset:

To train a model (create a solution), a dataset group that contains an Interactions dataset is required. Call CreateDataset to add a dataset to the group.

A dataset group can be in one of the following states:

To get the status of the dataset group, call DescribeDatasetGroup. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the creation failed.

You must wait until the status of the dataset group is ACTIVE before adding a dataset to the group.

You can specify an AWS Key Management Service (KMS) key to encrypt the datasets in the group. If you specify a KMS key, you must also include an AWS Identity and Access Management (IAM) role that has permission to access the key.

APIs that require a dataset group ARN in the request

Related APIs

", - "CreateDatasetImportJob": "

Creates a job that imports training data from your data source (an Amazon S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize to import the training data, you must specify an AWS Identity and Access Management (IAM) service role that has permission to read from the data source, as Amazon Personalize makes a copy of your data and processes it in an internal AWS system. For information on granting access to your Amazon S3 bucket, see Giving Amazon Personalize Access to Amazon S3 Resources.

The dataset import job replaces any existing data in the dataset that you imported in bulk.

Status

A dataset import job can be in one of the following states:

To get the status of the import job, call DescribeDatasetImportJob, providing the Amazon Resource Name (ARN) of the dataset import job. The dataset import is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the job failed.

Importing takes time. You must wait until the status shows as ACTIVE before training a model using the dataset.

Related APIs

", + "CreateDatasetExportJob": "

Creates a job that exports data from your dataset to an Amazon S3 bucket. To allow Amazon Personalize to export the training data, you must specify an service-linked IAM role that gives Amazon Personalize PutObject permissions for your Amazon S3 bucket. For information, see Exporting a dataset in the Amazon Personalize developer guide.

Status

A dataset export job can be in one of the following states:

To get the status of the export job, call DescribeDatasetExportJob, and specify the Amazon Resource Name (ARN) of the dataset export job. The dataset export is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the job failed.

", + "CreateDatasetGroup": "

Creates an empty dataset group. A dataset group contains related datasets that supply data for training a model. A dataset group can contain at most three datasets, one for each type of dataset:

To train a model (create a solution), a dataset group that contains an Interactions dataset is required. Call CreateDataset to add a dataset to the group.

A dataset group can be in one of the following states:

To get the status of the dataset group, call DescribeDatasetGroup. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the creation failed.

You must wait until the status of the dataset group is ACTIVE before adding a dataset to the group.

You can specify an Key Management Service (KMS) key to encrypt the datasets in the group. If you specify a KMS key, you must also include an Identity and Access Management (IAM) role that has permission to access the key.

APIs that require a dataset group ARN in the request

Related APIs

", + "CreateDatasetImportJob": "

Creates a job that imports training data from your data source (an Amazon S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize to import the training data, you must specify an IAM service role that has permission to read from the data source, as Amazon Personalize makes a copy of your data and processes it internally. For information on granting access to your Amazon S3 bucket, see Giving Amazon Personalize Access to Amazon S3 Resources.

The dataset import job replaces any existing data in the dataset that you imported in bulk.

Status

A dataset import job can be in one of the following states:

To get the status of the import job, call DescribeDatasetImportJob, providing the Amazon Resource Name (ARN) of the dataset import job. The dataset import is complete when the status shows as ACTIVE. If the status shows as CREATE FAILED, the response includes a failureReason key, which describes why the job failed.

Importing takes time. You must wait until the status shows as ACTIVE before training a model using the dataset.

Related APIs

", "CreateEventTracker": "

Creates an event tracker that you use when adding event data to a specified dataset group using the PutEvents API.

Only one event tracker can be associated with a dataset group. You will get an error if you call CreateEventTracker using the same dataset group as an existing event tracker.

When you create an event tracker, the response includes a tracking ID, which you pass as a parameter when you use the PutEvents operation. Amazon Personalize then appends the event data to the Interactions dataset of the dataset group you specify in your event tracker.

The event tracker can be in one of the following states:

To get the status of the event tracker, call DescribeEventTracker.

The event tracker must be in the ACTIVE state before using the tracking ID.

Related APIs

", "CreateFilter": "

Creates a recommendation filter. For more information, see filter.

", "CreateSchema": "

Creates an Amazon Personalize schema from the specified schema string. The schema you create must be in Avro JSON format.

Amazon Personalize recognizes three schema variants. Each schema is associated with a dataset type and has a set of required field and keywords. You specify a schema when you call CreateDataset.

Related APIs

", @@ -54,7 +54,7 @@ "AccountId": { "base": null, "refs": { - "EventTracker$accountId": "

The Amazon AWS account that owns the event tracker.

" + "EventTracker$accountId": "

The Amazon Web Services account that owns the event tracker.

" } }, "Algorithm": { @@ -113,13 +113,13 @@ "Dataset$schemaArn": "

The ARN of the associated schema.

", "DatasetExportJob$datasetExportJobArn": "

The Amazon Resource Name (ARN) of the dataset export job.

", "DatasetExportJob$datasetArn": "

The Amazon Resource Name (ARN) of the dataset to export.

", - "DatasetExportJob$roleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management service role that has permissions to add data to your output Amazon S3 bucket.

", + "DatasetExportJob$roleArn": "

The Amazon Resource Name (ARN) of the IAM service role that has permissions to add data to your output Amazon S3 bucket.

", "DatasetExportJobSummary$datasetExportJobArn": "

The Amazon Resource Name (ARN) of the dataset export job.

", "DatasetGroup$datasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group.

", "DatasetGroupSummary$datasetGroupArn": "

The Amazon Resource Name (ARN) of the dataset group.

", "DatasetImportJob$datasetImportJobArn": "

The ARN of the dataset import job.

", "DatasetImportJob$datasetArn": "

The Amazon Resource Name (ARN) of the dataset that receives the imported data.

", - "DatasetImportJob$roleArn": "

The ARN of the AWS Identity and Access Management (IAM) role that has permissions to read from the Amazon S3 data source.

", + "DatasetImportJob$roleArn": "

The ARN of the IAM role that has permissions to read from the Amazon S3 data source.

", "DatasetImportJobSummary$datasetImportJobArn": "

The Amazon Resource Name (ARN) of the dataset import job.

", "DatasetSchema$schemaArn": "

The Amazon Resource Name (ARN) of the schema.

", "DatasetSchemaSummary$schemaArn": "

The Amazon Resource Name (ARN) of the schema.

", @@ -486,7 +486,7 @@ } }, "DatasetGroup": { - "base": "

A dataset group is a collection of related datasets (Interactions, User, and Item). You create a dataset group by calling CreateDatasetGroup. You then create a dataset and add it to a dataset group by calling CreateDataset. The dataset group is used to create and train a solution by calling CreateSolution. A dataset group can contain only one of each type of dataset.

You can specify an AWS Key Management Service (KMS) key to encrypt the datasets in the group.

", + "base": "

A dataset group is a collection of related datasets (Interactions, User, and Item). You create a dataset group by calling CreateDatasetGroup. You then create a dataset and add it to a dataset group by calling CreateDataset. The dataset group is used to create and train a solution by calling CreateSolution. A dataset group can contain only one of each type of dataset.

You can specify an Key Management Service (KMS) key to encrypt the datasets in the group.

", "refs": { "DescribeDatasetGroupResponse$datasetGroup": "

A listing of the dataset group's properties.

" } @@ -1056,9 +1056,9 @@ "KmsKeyArn": { "base": null, "refs": { - "CreateDatasetGroupRequest$kmsKeyArn": "

The Amazon Resource Name (ARN) of a KMS key used to encrypt the datasets.

", - "DatasetGroup$kmsKeyArn": "

The Amazon Resource Name (ARN) of the KMS key used to encrypt the datasets.

", - "S3DataConfig$kmsKeyArn": "

The Amazon Resource Name (ARN) of the Amazon Key Management Service (KMS) key that Amazon Personalize uses to encrypt or decrypt the input and output files of a batch inference job.

" + "CreateDatasetGroupRequest$kmsKeyArn": "

The Amazon Resource Name (ARN) of a Key Management Service (KMS) key used to encrypt the datasets.

", + "DatasetGroup$kmsKeyArn": "

The Amazon Resource Name (ARN) of the Key Management Service (KMS) key used to encrypt the datasets.

", + "S3DataConfig$kmsKeyArn": "

The Amazon Resource Name (ARN) of the Key Management Service (KMS) key that Amazon Personalize uses to encrypt or decrypt the input and output files of a batch inference job.

" } }, "LimitExceededException": { @@ -1412,8 +1412,8 @@ "refs": { "BatchInferenceJob$roleArn": "

The ARN of the Amazon Identity and Access Management (IAM) role that requested the batch inference job.

", "CreateBatchInferenceJobRequest$roleArn": "

The ARN of the Amazon Identity and Access Management role that has permissions to read and write to your input and output Amazon S3 buckets respectively.

", - "CreateDatasetExportJobRequest$roleArn": "

The Amazon Resource Name (ARN) of the AWS Identity and Access Management service role that has permissions to add data to your output Amazon S3 bucket.

", - "CreateDatasetGroupRequest$roleArn": "

The ARN of the IAM role that has permissions to access the KMS key. Supplying an IAM role is only valid when also specifying a KMS key.

", + "CreateDatasetExportJobRequest$roleArn": "

The Amazon Resource Name (ARN) of the IAM service role that has permissions to add data to your output Amazon S3 bucket.

", + "CreateDatasetGroupRequest$roleArn": "

The ARN of the Identity and Access Management (IAM) role that has permissions to access the Key Management Service (KMS) key. Supplying an IAM role is only valid when also specifying a KMS key.

", "CreateDatasetImportJobRequest$roleArn": "

The ARN of the IAM role that has permissions to read from the Amazon S3 data source.

", "DatasetGroup$roleArn": "

The ARN of the IAM role that has permissions to create the dataset group.

" } diff --git a/models/apis/proton/2020-07-20/docs-2.json b/models/apis/proton/2020-07-20/docs-2.json index 6c184eb231..2268419c06 100644 --- a/models/apis/proton/2020-07-20/docs-2.json +++ b/models/apis/proton/2020-07-20/docs-2.json @@ -1,20 +1,20 @@ { "version": "2.0", - "service": "

This is the AWS Proton Service API Reference. It provides descriptions, syntax and usage examples for each of the actions and data types for the AWS Proton service.

The documentation for each action shows the Query API request parameters and the XML response.

Alternatively, you can use the AWS CLI to access an API. For more information, see the AWS Command Line Interface User Guide.

The AWS Proton service is a two-pronged automation framework. Administrators create service templates to provide standardized infrastructure and deployment tooling for serverless and container based applications. Developers, in turn, select from the available service templates to automate their application or service deployments.

Because administrators define the infrastructure and tooling that AWS Proton deploys and manages, they need permissions to use all of the listed API operations.

When developers select a specific infrastructure and tooling set, AWS Proton deploys their applications. To monitor their applications that are running on AWS Proton, developers need permissions to the service create, list, update and delete API operations and the service instance list and update API operations.

To learn more about AWS Proton administration, see the AWS Proton Administration Guide.

To learn more about deploying serverless and containerized applications on AWS Proton, see the AWS Proton User Guide.

Ensuring Idempotency

When you make a mutating API request, the request typically returns a result before the asynchronous workflows of the operation are complete. Operations might also time out or encounter other server issues before they're complete, even if the request already returned a result. This might make it difficult to determine whether the request succeeded. Moreover, you might need to retry the request multiple times to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation occurs multiple times. This means that you might create more resources than you intended.

Idempotency ensures that an API request action completes no more than one time. With an idempotent request, if the original request action completes successfully, any subsequent retries complete successfully without performing any further actions. However, the result might contain updated information, such as the current creation status.

The following lists of APIs are grouped according to methods that ensure idempotency.

Idempotent create APIs with a client token

The API actions in this list support idempotency with the use of a client token. The corresponding AWS CLI commands also support idempotency using a client token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. To make an idempotent API request using one of these actions, specify a client token in the request. We recommend that you don't reuse the same client token for other API requests. If you don’t provide a client token for these APIs, a default client token is automatically provided by SDKs.

Given a request action that has succeeded:

If you retry the request using the same client token and the same parameters, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If you retry the request using the same client token, but one or more of the parameters are different, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Client tokens expire eight hours after a request is made. If you retry the request with the expired token, a new resource is created.

If the original resource is deleted and you retry the request, a new resource is created.

Idempotent create APIs with a client token:

 <p> <b>Idempotent delete APIs</b> </p> <p>Given a request action that has succeeded:</p> <p>When you retry the request with an API from this group and the resource was deleted, its metadata is returned in the response.</p> <p>If you retry and the resource doesn't exist, the response is empty.</p> <p>In both cases, the retry succeeds.</p> <p>Idempotent delete APIs:</p> <ul> <li> <p>DeleteEnvironmentTemplate</p> </li> <li> <p>DeleteEnvironmentTemplateVersion</p> </li> <li> <p>DeleteServiceTemplate</p> </li> <li> <p>DeleteServiceTemplateVersion</p> </li> <li> <p>DeleteEnvironmentAccountConnection</p> </li> </ul> <p> <b>Asynchronous idempotent delete APIs</b> </p> <p>Given a request action that has succeeded:</p> <p>If you retry the request with an API from this group, if the original request delete operation status is <code>DELETE_IN_PROGRESS</code>, the retry returns the resource detail data in the response without performing any further actions.</p> <p>If the original request delete operation is complete, a retry returns an empty response.</p> <p>Asynchronous idempotent delete APIs:</p> <ul> <li> <p>DeleteEnvironment</p> </li> <li> <p>DeleteService</p> </li> </ul> 
", + "service": "

This is the AWS Proton Service API Reference. It provides descriptions, syntax and usage examples for each of the actions and data types for the AWS Proton service.

The documentation for each action shows the Query API request parameters and the XML response.

Alternatively, you can use the AWS CLI to access an API. For more information, see the AWS Command Line Interface User Guide.

The AWS Proton service is a two-pronged automation framework. Administrators create service templates to provide standardized infrastructure and deployment tooling for serverless and container based applications. Developers, in turn, select from the available service templates to automate their application or service deployments.

Because administrators define the infrastructure and tooling that AWS Proton deploys and manages, they need permissions to use all of the listed API operations.

When developers select a specific infrastructure and tooling set, AWS Proton deploys their applications. To monitor their applications that are running on AWS Proton, developers need permissions to the service create, list, update and delete API operations and the service instance list and update API operations.

To learn more about AWS Proton administration, see the AWS Proton Administrator Guide.

To learn more about deploying serverless and containerized applications on AWS Proton, see the AWS Proton User Guide.

Ensuring Idempotency

When you make a mutating API request, the request typically returns a result before the asynchronous workflows of the operation are complete. Operations might also time out or encounter other server issues before they're complete, even if the request already returned a result. This might make it difficult to determine whether the request succeeded. Moreover, you might need to retry the request multiple times to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation occurs multiple times. This means that you might create more resources than you intended.

Idempotency ensures that an API request action completes no more than one time. With an idempotent request, if the original request action completes successfully, any subsequent retries complete successfully without performing any further actions. However, the result might contain updated information, such as the current creation status.

The following lists of APIs are grouped according to methods that ensure idempotency.

Idempotent create APIs with a client token

The API actions in this list support idempotency with the use of a client token. The corresponding AWS CLI commands also support idempotency using a client token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. To make an idempotent API request using one of these actions, specify a client token in the request. We recommend that you don't reuse the same client token for other API requests. If you don’t provide a client token for these APIs, a default client token is automatically provided by SDKs.

Given a request action that has succeeded:

If you retry the request using the same client token and the same parameters, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If you retry the request using the same client token, but one or more of the parameters are different, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Client tokens expire eight hours after a request is made. If you retry the request with the expired token, a new resource is created.

If the original resource is deleted and you retry the request, a new resource is created.

Idempotent create APIs with a client token:

Idempotent delete APIs

Given a request action that has succeeded:

When you retry the request with an API from this group and the resource was deleted, its metadata is returned in the response.

If you retry and the resource doesn't exist, the response is empty.

In both cases, the retry succeeds.

Idempotent delete APIs:

Asynchronous idempotent delete APIs

Given a request action that has succeeded:

If you retry the request with an API from this group, if the original request delete operation status is DELETE_IN_PROGRESS, the retry returns the resource detail data in the response without performing any further actions.

If the original request delete operation is complete, a retry returns an empty response.

Asynchronous idempotent delete APIs:

", "operations": { - "AcceptEnvironmentAccountConnection": "

In a management account, an environment account connection request is accepted. When the environment account connection request is accepted, AWS Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account.

For more information, see Environment account connections in the AWS Proton Administration guide.

", - "CancelEnvironmentDeployment": "

Attempts to cancel an environment deployment on an UpdateEnvironment action, if the deployment is IN_PROGRESS. For more information, see Update an environment in the AWS Proton Administration guide.

The following list includes potential cancellation scenarios.

", - "CancelServiceInstanceDeployment": "

Attempts to cancel a service instance deployment on an UpdateServiceInstance action, if the deployment is IN_PROGRESS. For more information, see Update a service instance in the AWS Proton Administration guide or the AWS Proton User guide.

The following list includes potential cancellation scenarios.

", - "CancelServicePipelineDeployment": "

Attempts to cancel a service pipeline deployment on an UpdateServicePipeline action, if the deployment is IN_PROGRESS. For more information, see Update a service pipeline in the AWS Proton Administration guide or the AWS Proton User guide.

The following list includes potential cancellation scenarios.

", - "CreateEnvironment": "

Deploy a new environment. An AWS Proton environment is created from an environment template that defines infrastructure and resources that can be shared across services. For more information, see the Environments in the AWS Proton Administration Guide.

", - "CreateEnvironmentAccountConnection": "

Create an environment account connection in an environment account so that environment infrastructure resources can be provisioned in the environment account from the management account.

An environment account connection is a secure bi-directional connection between a management account and an environment account that maintains authorization and permissions. For more information, see Environment account connections in the AWS Proton Administration guide.

", - "CreateEnvironmentTemplate": "

Create an environment template for AWS Proton. For more information, see Environment Templates in the AWS Proton Administration Guide.

You can create an environment template in one of the two following ways:

", + "AcceptEnvironmentAccountConnection": "

In a management account, an environment account connection request is accepted. When the environment account connection request is accepted, AWS Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", + "CancelEnvironmentDeployment": "

Attempts to cancel an environment deployment on an UpdateEnvironment action, if the deployment is IN_PROGRESS. For more information, see Update an environment in the AWS Proton Administrator guide.

The following list includes potential cancellation scenarios.

", + "CancelServiceInstanceDeployment": "

Attempts to cancel a service instance deployment on an UpdateServiceInstance action, if the deployment is IN_PROGRESS. For more information, see Update a service instance in the AWS Proton Administrator guide or the AWS Proton User guide.

The following list includes potential cancellation scenarios.

", + "CancelServicePipelineDeployment": "

Attempts to cancel a service pipeline deployment on an UpdateServicePipeline action, if the deployment is IN_PROGRESS. For more information, see Update a service pipeline in the AWS Proton Administrator guide or the AWS Proton User guide.

The following list includes potential cancellation scenarios.

", + "CreateEnvironment": "

Deploy a new environment. An AWS Proton environment is created from an environment template that defines infrastructure and resources that can be shared across services. For more information, see the Environments in the AWS Proton Administrator Guide.

", + "CreateEnvironmentAccountConnection": "

Create an environment account connection in an environment account so that environment infrastructure resources can be provisioned in the environment account from a management account.

An environment account connection is a secure bi-directional connection between a management account and an environment account that maintains authorization and permissions. For more information, see Environment account connections in the AWS Proton Administrator guide.

", + "CreateEnvironmentTemplate": "

Create an environment template for AWS Proton. For more information, see Environment Templates in the AWS Proton Administrator Guide.

You can create an environment template in one of the two following ways:

", "CreateEnvironmentTemplateVersion": "

Create a new major or minor version of an environment template. A major version of an environment template is a version that isn't backwards compatible. A minor version of an environment template is a version that's backwards compatible within its major version.

", - "CreateService": "

Create an AWS Proton service. An AWS Proton service is an instantiation of a service template and often includes several service instances and pipeline. For more information, see Services in the AWS Proton Administration Guide and Services in the AWS Proton User Guide.

", - "CreateServiceTemplate": "

Create a service template. The administrator creates a service template to define standardized infrastructure and an optional CICD service pipeline. Developers, in turn, select the service template from AWS Proton. If the selected service template includes a service pipeline definition, they provide a link to their source code repository. AWS Proton then deploys and manages the infrastructure defined by the selected service template. For more information, see Service Templates in the AWS Proton Administration Guide.

", + "CreateService": "

Create an AWS Proton service. An AWS Proton service is an instantiation of a service template and often includes several service instances and pipeline. For more information, see Services in the AWS Proton Administrator Guide and Services in the AWS Proton User Guide.

", + "CreateServiceTemplate": "

Create a service template. The administrator creates a service template to define standardized infrastructure and an optional CICD service pipeline. Developers, in turn, select the service template from AWS Proton. If the selected service template includes a service pipeline definition, they provide a link to their source code repository. AWS Proton then deploys and manages the infrastructure defined by the selected service template. For more information, see Service Templates in the AWS Proton Administrator Guide.

", "CreateServiceTemplateVersion": "

Create a new major or minor version of a service template. A major version of a service template is a version that isn't backwards compatible. A minor version of a service template is a version that's backwards compatible within its major version.

", "DeleteEnvironment": "

Delete an environment.

", - "DeleteEnvironmentAccountConnection": "

In an environment account, delete an environment account connection.

After you delete an environment account connection that’s in use by an AWS Proton environment, AWS Proton can’t manage the environment infrastructure resources until a new environment account connection is accepted for the environment account and associated environment. You're responsible for cleaning up provisioned resources that remain without an environment connection.

For more information, see Environment account connections in the AWS Proton Administration guide.

", + "DeleteEnvironmentAccountConnection": "

In an environment account, delete an environment account connection.

After you delete an environment account connection that’s in use by an AWS Proton environment, AWS Proton can’t manage the environment infrastructure resources until a new environment account connection is accepted for the environment account and associated environment. You're responsible for cleaning up provisioned resources that remain without an environment connection.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", "DeleteEnvironmentTemplate": "

If no other major or minor versions of an environment template exist, delete the environment template.

", "DeleteEnvironmentTemplateVersion": "

If no other minor versions of an environment template exist, delete a major version of the environment template if it's not the Recommended version. Delete the Recommended version of the environment template if no other major versions or minor versions of the environment template exist. A major version of an environment template is a version that's not backwards compatible.

Delete a minor version of an environment template if it isn't the Recommended version. Delete a Recommended minor version of the environment template if no other minor versions of the environment template exist. A minor version of an environment template is a version that's backwards compatible.

", "DeleteService": "

Delete a service.

", @@ -22,14 +22,14 @@ "DeleteServiceTemplateVersion": "

If no other minor versions of a service template exist, delete a major version of the service template if it's not the Recommended version. Delete the Recommended version of the service template if no other major versions or minor versions of the service template exist. A major version of a service template is a version that isn't backwards compatible.

Delete a minor version of a service template if it's not the Recommended version. Delete a Recommended minor version of the service template if no other minor versions of the service template exist. A minor version of a service template is a version that's backwards compatible.

", "GetAccountSettings": "

Get detail data for the AWS Proton pipeline service role.

", "GetEnvironment": "

Get detail data for an environment.

", - "GetEnvironmentAccountConnection": "

In an environment account, view the detail data for an environment account connection.

For more information, see Environment account connections in the AWS Proton Administration guide.

", + "GetEnvironmentAccountConnection": "

In an environment account, view the detail data for an environment account connection.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", "GetEnvironmentTemplate": "

Get detail data for an environment template.

", "GetEnvironmentTemplateVersion": "

View detail data for a major or minor version of an environment template.

", "GetService": "

Get detail data for a service.

", "GetServiceInstance": "

Get detail data for a service instance. A service instance is an instantiation of service template, which is running in a specific environment.

", "GetServiceTemplate": "

Get detail data for a service template.

", "GetServiceTemplateVersion": "

View detail data for a major or minor version of a service template.

", - "ListEnvironmentAccountConnections": "

View a list of environment account connections.

For more information, see Environment account connections in the AWS Proton Administration guide.

", + "ListEnvironmentAccountConnections": "

View a list of environment account connections.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", "ListEnvironmentTemplateVersions": "

List major or minor versions of an environment template with detail data.

", "ListEnvironmentTemplates": "

List environment templates.

", "ListEnvironments": "

List environments with detail data summaries.

", @@ -37,13 +37,13 @@ "ListServiceTemplateVersions": "

List major or minor versions of a service template with detail data.

", "ListServiceTemplates": "

List service templates with detail data.

", "ListServices": "

List services with summaries of detail data.

", - "ListTagsForResource": "

List tags for a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

", - "RejectEnvironmentAccountConnection": "

In a management account, reject an environment account connection from another environment account.

After you reject an environment account connection request, you won’t be able to accept or use the rejected environment account connection.

You can’t reject an environment account connection that is connected to an environment.

For more information, see Environment account connections in the AWS Proton Administration guide.

", - "TagResource": "

Tag a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

", - "UntagResource": "

Remove a tag from a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

", + "ListTagsForResource": "

List tags for a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

", + "RejectEnvironmentAccountConnection": "

In a management account, reject an environment account connection from another environment account.

After you reject an environment account connection request, you won’t be able to accept or use the rejected environment account connection.

You can’t reject an environment account connection that is connected to an environment.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", + "TagResource": "

Tag a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

", + "UntagResource": "

Remove a tag from a resource. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

", "UpdateAccountSettings": "

Update the AWS Proton pipeline service account settings.

", - "UpdateEnvironment": "

Update an environment.

If the environment is associated with an environment account connection, don't update or include the protonServiceRoleArn parameter to update or connect to an environment account connection.

You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and associated with the current environment.

If the environment isn't associated with an environment account connection, don't update or include the environmentAccountConnectionId parameter to update or connect to an environment account connection.

You can update either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. You can’t update both.

There are four modes for updating an environment as described in the following. The deploymentType field defines the mode.

NONE

In this mode, a deployment doesn't occur. Only the requested metadata parameters are updated.

CURRENT_VERSION

In this mode, the environment is deployed and updated with the new spec that you provide. Only requested parameters are updated. Don’t include minor or major version parameters when you use this deployment-type.

MINOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) minor version of the current major version in use, by default. You can also specify a different minor version of the current major version in use.

MAJOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) major and minor version of the current template, by default. You can also specify a different major version that's higher than the major version in use and a minor version (optional).

", - "UpdateEnvironmentAccountConnection": "

In an environment account, update an environment account connection to use a new IAM role.

For more information, see Environment account connections in the AWS Proton Administration guide.

", + "UpdateEnvironment": "

Update an environment.

If the environment is associated with an environment account connection, don't update or include the protonServiceRoleArn parameter to update or connect to an environment account connection.

You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and is associated with the current environment.

If the environment isn't associated with an environment account connection, don't update or include the environmentAccountConnectionId parameter to update or connect to an environment account connection.

You can update either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. You can’t update both.

There are four modes for updating an environment as described in the following. The deploymentType field defines the mode.

NONE

In this mode, a deployment doesn't occur. Only the requested metadata parameters are updated.

CURRENT_VERSION

In this mode, the environment is deployed and updated with the new spec that you provide. Only requested parameters are updated. Don’t include minor or major version parameters when you use this deployment-type.

MINOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) minor version of the current major version in use, by default. You can also specify a different minor version of the current major version in use.

MAJOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) major and minor version of the current template, by default. You can also specify a different major version that's higher than the major version in use and a minor version (optional).

", + "UpdateEnvironmentAccountConnection": "

In an environment account, update an environment account connection to use a new IAM role.

For more information, see Environment account connections in the AWS Proton Administrator guide.

", "UpdateEnvironmentTemplate": "

Update an environment template.

", "UpdateEnvironmentTemplateVersion": "

Update a major or minor version of an environment template.

", "UpdateService": "

Edit a service description or use a spec to add and delete service instances.

Existing service instances and the service pipeline can't be edited using this API. They can only be deleted.

Use the description parameter to modify the description.

Edit the spec parameter to add or delete instances.

", @@ -82,22 +82,22 @@ "CreateEnvironmentAccountConnectionInput$roleArn": "

The Amazon Resource Name (ARN) of the IAM service role that's created in the environment account. AWS Proton uses this role to provision infrastructure resources in the associated environment account.

", "CreateEnvironmentInput$protonServiceRoleArn": "

The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf. You must include either the environmentAccountConnectionId or protonServiceRoleArn parameter and value.

", "CreateEnvironmentTemplateInput$encryptionKey": "

A customer provided encryption key that AWS Proton uses to encrypt data.

", - "CreateServiceInput$repositoryConnectionArn": "

The ARN of the repository connection. For more information, see Set up repository connection in the AWS Proton Administration Guide and Getting started in the AWS Proton User Guide. Don't include this parameter if your service template doesn't include a service pipeline.

", + "CreateServiceInput$repositoryConnectionArn": "

The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up repository connection in the AWS Proton Administrator Guide and Setting up with AWS Proton in the AWS Proton User Guide. Don't include this parameter if your service template doesn't include a service pipeline.

", "CreateServiceTemplateInput$encryptionKey": "

A customer provided encryption key that's used to encrypt data.

", - "Environment$protonServiceRoleArn": "

The ARN of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.

", + "Environment$protonServiceRoleArn": "

The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.

", "EnvironmentAccountConnection$roleArn": "

The IAM service role that's associated with the environment account connection.

", "EnvironmentAccountConnectionSummary$roleArn": "

The IAM service role that's associated with the environment account connection.

", - "EnvironmentSummary$protonServiceRoleArn": "

The ARN of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.

", + "EnvironmentSummary$protonServiceRoleArn": "

The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make calls to other services on your behalf.

", "EnvironmentTemplate$encryptionKey": "

The customer provided encryption key for the environment template.

", - "ListTagsForResourceInput$resourceArn": "

The ARN of the resource for the listed tags.

", - "Service$repositoryConnectionArn": "

The ARN of the repository connection. For more information, see Set up a repository connection in the AWS Proton Administration Guide and Getting started in the AWS Proton User Guide.

", + "ListTagsForResourceInput$resourceArn": "

The Amazon Resource Name (ARN) of the resource for the listed tags.

", + "Service$repositoryConnectionArn": "

The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up a repository connection in the AWS Proton Administrator Guide and Setting up with AWS Proton in the AWS Proton User Guide.

", "ServicePipeline$arn": "

The Amazon Resource Name (ARN) of the service pipeline.

", "ServiceTemplate$encryptionKey": "

The customer provided service template encryption key that's used to encrypt data.

", "TagResourceInput$resourceArn": "

The Amazon Resource Name (ARN) of the resource that the resource tag is applied to.

", "UntagResourceInput$resourceArn": "

The Amazon Resource Name (ARN) of the resource that the tag is to be removed from.

", "UpdateAccountSettingsInput$pipelineServiceRoleArn": "

The Amazon Resource Name (ARN) of the AWS Proton pipeline service role.

", "UpdateEnvironmentAccountConnectionInput$roleArn": "

The Amazon Resource Name (ARN) of the IAM service role that is associated with the environment account connection to update.

", - "UpdateEnvironmentInput$protonServiceRoleArn": "

The ARN of the AWS Proton service role that allows AWS Proton to make API calls to other services your behalf.

" + "UpdateEnvironmentInput$protonServiceRoleArn": "

The Amazon Resource Name (ARN) of the AWS Proton service role that allows AWS Proton to make API calls to other services your behalf.

" } }, "AwsAccountId": { @@ -412,7 +412,7 @@ "base": null, "refs": { "AcceptEnvironmentAccountConnectionInput$id": "

The ID of the environment account connection.

", - "CreateEnvironmentInput$environmentAccountConnectionId": "

The ID of the environment account connection that you provide if you're provisioning your environment infrastructure resources to an environment account. You must include either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. For more information, see Environment account connections in the AWS Proton Administration guide.

", + "CreateEnvironmentInput$environmentAccountConnectionId": "

The ID of the environment account connection that you provide if you're provisioning your environment infrastructure resources to an environment account. You must include either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. For more information, see Environment account connections in the AWS Proton Administrator guide.

", "DeleteEnvironmentAccountConnectionInput$id": "

The ID of the environment account connection to delete.

", "Environment$environmentAccountConnectionId": "

The ID of the environment account connection that's used to provision infrastructure resources in an environment account.

", "EnvironmentAccountConnection$id": "

The ID of the environment account connection.

", @@ -421,7 +421,7 @@ "GetEnvironmentAccountConnectionInput$id": "

The ID of the environment account connection.

", "RejectEnvironmentAccountConnectionInput$id": "

The ID of the environment account connection to reject.

", "UpdateEnvironmentAccountConnectionInput$id": "

The ID of the environment account connection to update.

", - "UpdateEnvironmentInput$environmentAccountConnectionId": "

The ID of the environment account connection.

You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and associated with the current environment.

" + "UpdateEnvironmentInput$environmentAccountConnectionId": "

The ID of the environment account connection.

You can only update to a new environment account connection if it was created in the same environment account that the current environment account connection was created in and is associated with the current environment.

" } }, "EnvironmentAccountConnectionRequesterAccountType": { @@ -795,7 +795,7 @@ "base": null, "refs": { "CreateEnvironmentTemplateInput$provisioning": "

When included, indicates that the environment template is for customer provisioned and managed infrastructure.

", - "CreateServiceTemplateInput$pipelineProvisioning": "

AWS Proton includes a service pipeline for your service by default. When included, this parameter indicates that an AWS Proton service pipeline won't be included for your service. Once specified, this parameter can't be changed. For more information, see Service template bundles in the AWS Proton Administration Guide.

", + "CreateServiceTemplateInput$pipelineProvisioning": "

AWS Proton includes a service pipeline for your service by default. When included, this parameter indicates that an AWS Proton service pipeline won't be included for your service. Once specified, this parameter can't be changed. For more information, see Service template bundles in the AWS Proton Administrator Guide.

", "Environment$provisioning": "

When included, indicates that the environment template is for customer provisioned and managed infrastructure.

", "EnvironmentSummary$provisioning": "

When included, indicates that the environment template is for customer provisioned and managed infrastructure.

", "EnvironmentTemplate$provisioning": "

When included, indicates that the environment template is for customer provisioned and managed infrastructure.

", @@ -832,7 +832,7 @@ "CompatibleEnvironmentTemplateInput$templateName": "

The compatible environment template name.

", "CreateEnvironmentAccountConnectionInput$environmentName": "

The name of the AWS Proton environment that's created in the associated management account.

", "CreateEnvironmentInput$name": "

The name of the environment.

", - "CreateEnvironmentInput$templateName": "

The name of the environment template. For more information, see Environment Templates in the AWS Proton Administration Guide.

", + "CreateEnvironmentInput$templateName": "

The name of the environment template. For more information, see Environment Templates in the AWS Proton Administrator Guide.

", "CreateEnvironmentTemplateInput$name": "

The name of the environment template.

", "CreateEnvironmentTemplateVersionInput$templateName": "

The name of the environment template.

", "CreateServiceInput$name": "

The service name.

", @@ -846,7 +846,7 @@ "DeleteServiceTemplateInput$name": "

The name of the service template to delete.

", "DeleteServiceTemplateVersionInput$templateName": "

The name of the service template.

", "Environment$name": "

The name of the environment.

", - "Environment$templateName": "

The ARN of the environment template.

", + "Environment$templateName": "

The Amazon Resource Name (ARN) of the environment template.

", "EnvironmentAccountConnection$environmentName": "

The name of the environment that's associated with the environment account connection.

", "EnvironmentAccountConnectionSummary$environmentName": "

The name of the environment that's associated with the environment account connection.

", "EnvironmentSummary$name": "

The name of the environment.

", @@ -971,7 +971,7 @@ } }, "ServiceQuotaExceededException": { - "base": "

A quota was exceeded. For more information, see AWS Proton Quotas in the AWS Proton Administration Guide.

", + "base": "

A quota was exceeded. For more information, see AWS Proton Quotas in the AWS Proton Administrator Guide.

", "refs": { } }, @@ -1053,14 +1053,14 @@ "SpecContents": { "base": null, "refs": { - "CreateEnvironmentInput$spec": "

A link to a YAML formatted spec file that provides inputs as defined in the environment template bundle schema file. For more information, see Environments in the AWS Proton Administration Guide.

", - "CreateServiceInput$spec": "

A link to a spec file that provides inputs as defined in the service template bundle schema file. The spec file is in YAML format. Don’t include pipeline inputs in the spec if your service template doesn’t include a service pipeline. For more information, see Create a service in the AWS Proton Administration Guide and Create a service in the AWS Proton User Guide.

", + "CreateEnvironmentInput$spec": "

A link to a YAML formatted spec file that provides inputs as defined in the environment template bundle schema file. For more information, see Environments in the AWS Proton Administrator Guide.

", + "CreateServiceInput$spec": "

A link to a spec file that provides inputs as defined in the service template bundle schema file. The spec file is in YAML format. Don’t include pipeline inputs in the spec if your service template doesn’t include a service pipeline. For more information, see Create a service in the AWS Proton Administrator Guide and Create a service in the AWS Proton User Guide.

", "Environment$spec": "

The environment spec.

", "Service$spec": "

The formatted specification that defines the service.

", "ServiceInstance$spec": "

The service spec that was used to create the service instance.

", "ServicePipeline$spec": "

The service spec that was used to create the service pipeline.

", "UpdateEnvironmentInput$spec": "

The formatted specification that defines the update.

", - "UpdateServiceInput$spec": "

Lists the service instances to add and the existing service instances to remain. Omit the existing service instances to delete from the list. Don't include edits to the existing service instances or pipeline. For more information, see Edit a service in the AWS Proton Administration Guide or the AWS Proton User Guide.

", + "UpdateServiceInput$spec": "

Lists the service instances to add and the existing service instances to remain. Omit the existing service instances to delete from the list. Don't include edits to the existing service instances or pipeline. For more information, see Edit a service in the AWS Proton Administrator Guide or the AWS Proton User Guide.

", "UpdateServiceInstanceInput$spec": "

The formatted specification that defines the service instance update.

", "UpdateServicePipelineInput$spec": "

The spec for the service pipeline to update.

" } @@ -1110,11 +1110,11 @@ "TagList": { "base": null, "refs": { - "CreateEnvironmentInput$tags": "

Create tags for your environment. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

", - "CreateEnvironmentTemplateInput$tags": "

Create tags for your environment template. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

", + "CreateEnvironmentInput$tags": "

Create tags for your environment. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

", + "CreateEnvironmentTemplateInput$tags": "

Create tags for your environment template. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

", "CreateEnvironmentTemplateVersionInput$tags": "

Create tags for a new version of an environment template.

", - "CreateServiceInput$tags": "

Create tags for your service. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

", - "CreateServiceTemplateInput$tags": "

Create tags for your service template. For more information, see AWS Proton resources and tagging in the AWS Proton Administration Guide or AWS Proton User Guide.

", + "CreateServiceInput$tags": "

Create tags for your service. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

", + "CreateServiceTemplateInput$tags": "

Create tags for your service template. For more information, see AWS Proton resources and tagging in the AWS Proton Administrator Guide or AWS Proton User Guide.

", "CreateServiceTemplateVersionInput$tags": "

Create tags for a new version of a service template.

", "ListTagsForResourceOutput$tags": "

An array of resource tags with detail data.

", "TagResourceInput$tags": "

An array of resource tags to apply to a resource.

" @@ -1150,7 +1150,7 @@ "CompatibleEnvironmentTemplateInput$majorVersion": "

The major version of the compatible environment template.

", "CreateEnvironmentInput$templateMajorVersion": "

The ID of the major version of the environment template.

", "CreateEnvironmentInput$templateMinorVersion": "

The ID of the minor version of the environment template.

", - "CreateEnvironmentTemplateVersionInput$majorVersion": "

To create a new minor version of the environment template, include a majorVersion.

To create a new major and minor version of the environment template, exclude majorVersion.

", + "CreateEnvironmentTemplateVersionInput$majorVersion": "

To create a new minor version of the environment template, include a majorVersion.

To create a new major and minor version of the environment template, exclude majorVersion.

", "CreateServiceInput$templateMajorVersion": "

The ID of the major version of the service template that was used to create the service.

", "CreateServiceInput$templateMinorVersion": "

The ID of the minor version of the service template that was used to create the service.

", "CreateServiceTemplateVersionInput$majorVersion": "

To create a new minor version of the service template, include a majorVersion.

To create a new major and minor version of the service template, exclude majorVersion.

", diff --git a/models/apis/rds/2014-10-31/api-2.json b/models/apis/rds/2014-10-31/api-2.json index e2cba301b6..5b1238c4c1 100644 --- a/models/apis/rds/2014-10-31/api-2.json +++ b/models/apis/rds/2014-10-31/api-2.json @@ -4483,7 +4483,8 @@ "IAMDatabaseAuthenticationEnabled":{"shape":"Boolean"}, "ProcessorFeatures":{"shape":"ProcessorFeatureList"}, "DbiResourceId":{"shape":"String"}, - "TagList":{"shape":"TagList"} + "TagList":{"shape":"TagList"}, + "OriginalSnapshotCreateTime":{"shape":"TStamp"} }, "wrapper":true }, diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 348f54f43f..1094dcf18b 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -2215,7 +2215,7 @@ "DescribeDBClusterParameterGroupsMessage$Filters": "

This parameter isn't currently supported.

", "DescribeDBClusterParametersMessage$Filters": "

This parameter isn't currently supported.

", "DescribeDBClusterSnapshotsMessage$Filters": "

A filter that specifies one or more DB cluster snapshots to describe.

Supported filters:

", - "DescribeDBClustersMessage$Filters": "

A filter that specifies one or more DB clusters to describe.

Supported filters:

", + "DescribeDBClustersMessage$Filters": "

A filter that specifies one or more DB clusters to describe.

Supported filters:

", "DescribeDBEngineVersionsMessage$Filters": "

This parameter isn't currently supported.

", "DescribeDBInstanceAutomatedBackupsMessage$Filters": "

A filter that specifies which resources to return based on status.

Supported filters are the following:

Returns all resources by default. The status for each resource is specified in the response.

", "DescribeDBInstancesMessage$Filters": "

A filter that specifies one or more DB instances to describe.

Supported filters:

", @@ -2235,7 +2235,7 @@ "DescribeEventSubscriptionsMessage$Filters": "

This parameter isn't currently supported.

", "DescribeEventsMessage$Filters": "

This parameter isn't currently supported.

", "DescribeExportTasksMessage$Filters": "

Filters specify one or more snapshot exports to describe. The filters are specified as name-value pairs that define what to include in the output. Filter names and values are case-sensitive.

Supported filters include the following:

", - "DescribeGlobalClustersMessage$Filters": "

A filter that specifies one or more global DB clusters to describe.

Supported filters:

", + "DescribeGlobalClustersMessage$Filters": "

This parameter isn't currently supported.

", "DescribeInstallationMediaMessage$Filters": "

A filter that specifies one or more installation media to describe. Supported filters include the following:

", "DescribeOptionGroupOptionsMessage$Filters": "

This parameter isn't currently supported.

", "DescribeOptionGroupsMessage$Filters": "

This parameter isn't currently supported.

", @@ -3965,7 +3965,7 @@ "DBSnapshot$SnapshotType": "

Provides the type of the DB snapshot.

", "DBSnapshot$OptionGroupName": "

Provides the option group name for the DB snapshot.

", "DBSnapshot$SourceRegion": "

The Amazon Web Services Region that the DB snapshot was created in or copied from.

", - "DBSnapshot$SourceDBSnapshotIdentifier": "

The DB snapshot Amazon Resource Name (ARN) that the DB snapshot was copied from. It only has value in case of cross-customer or cross-region copy.

", + "DBSnapshot$SourceDBSnapshotIdentifier": "

The DB snapshot Amazon Resource Name (ARN) that the DB snapshot was copied from. It only has a value in the case of a cross-account or cross-Region copy.

", "DBSnapshot$StorageType": "

Specifies the storage type associated with DB snapshot.

", "DBSnapshot$TdeCredentialArn": "

The ARN from the key store with which to associate the instance for TDE encryption.

", "DBSnapshot$KmsKeyId": "

If Encrypted is true, the Amazon Web Services KMS key identifier for the encrypted DB snapshot.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the Amazon Web Services KMS customer master key (CMK).

", @@ -4633,8 +4633,9 @@ "DBProxyEndpoint$CreatedDate": "

The date and time when the DB proxy endpoint was first created.

", "DBProxyTargetGroup$CreatedDate": "

The date and time when the target group was first created.

", "DBProxyTargetGroup$UpdatedDate": "

The date and time when the target group was last updated.

", - "DBSnapshot$SnapshotCreateTime": "

Specifies when the snapshot was taken in Coordinated Universal Time (UTC).

", + "DBSnapshot$SnapshotCreateTime": "

Specifies when the snapshot was taken in Coordinated Universal Time (UTC). Changes for the copy when the snapshot is copied.

", "DBSnapshot$InstanceCreateTime": "

Specifies the time in Coordinated Universal Time (UTC) when the DB instance, from which the snapshot was taken, was created.

", + "DBSnapshot$OriginalSnapshotCreateTime": "

Specifies the time of the CreateDBSnapshot operation in Coordinated Universal Time (UTC). Doesn't change when the snapshot is copied.

", "DescribeEventsMessage$StartTime": "

The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

Example: 2009-07-08T18:00Z

", "DescribeEventsMessage$EndTime": "

The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the ISO8601 Wikipedia page.

Example: 2009-07-08T18:00Z

", "Event$Date": "

Specifies the date and time of the event.

", diff --git a/service/codebuild/api.go b/service/codebuild/api.go index cb2fd11f24..c8c16b8606 100644 --- a/service/codebuild/api.go +++ b/service/codebuild/api.go @@ -545,12 +545,11 @@ func (c *CodeBuild) CreateProjectRequest(input *CreateProjectInput) (req *reques // The input value that was provided is not valid. // // * ResourceAlreadyExistsException -// The specified Amazon Web Services resource cannot be created, because an -// Amazon Web Services resource with the same settings already exists. +// The specified AWS resource cannot be created, because an AWS resource with +// the same settings already exists. // // * AccountLimitExceededException -// An Amazon Web Services service limit was exceeded for the calling Amazon -// Web Services account. +// An AWS service limit was exceeded for the calling AWS account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/CreateProject func (c *CodeBuild) CreateProject(input *CreateProjectInput) (*CreateProjectOutput, error) { @@ -632,12 +631,11 @@ func (c *CodeBuild) CreateReportGroupRequest(input *CreateReportGroupInput) (req // The input value that was provided is not valid. // // * ResourceAlreadyExistsException -// The specified Amazon Web Services resource cannot be created, because an -// Amazon Web Services resource with the same settings already exists. +// The specified AWS resource cannot be created, because an AWS resource with +// the same settings already exists. // // * AccountLimitExceededException -// An Amazon Web Services service limit was exceeded for the calling Amazon -// Web Services account. +// An AWS service limit was exceeded for the calling AWS account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/CreateReportGroup func (c *CodeBuild) CreateReportGroup(input *CreateReportGroupInput) (*CreateReportGroupOutput, error) { @@ -705,17 +703,18 @@ func (c *CodeBuild) CreateWebhookRequest(input *CreateWebhookInput) (req *reques // CreateWebhook API operation for AWS CodeBuild. // -// For an existing CodeBuild build project that has its source code stored in -// a GitHub or Bitbucket repository, enables CodeBuild to start rebuilding the -// source code every time a code change is pushed to the repository. +// For an existing AWS CodeBuild build project that has its source code stored +// in a GitHub or Bitbucket repository, enables AWS CodeBuild to start rebuilding +// the source code every time a code change is pushed to the repository. // -// If you enable webhooks for an CodeBuild project, and the project is used -// as a build step in CodePipeline, then two identical builds are created for -// each commit. One build is triggered through webhooks, and one through CodePipeline. -// Because billing is on a per-build basis, you are billed for both builds. -// Therefore, if you are using CodePipeline, we recommend that you disable webhooks -// in CodeBuild. In the CodeBuild console, clear the Webhook box. For more information, -// see step 5 in Change a Build Project's Settings (https://docs.aws.amazon.com/codebuild/latest/userguide/change-project.html#change-project-console). +// If you enable webhooks for an AWS CodeBuild project, and the project is used +// as a build step in AWS CodePipeline, then two identical builds are created +// for each commit. One build is triggered through webhooks, and one through +// AWS CodePipeline. Because billing is on a per-build basis, you are billed +// for both builds. Therefore, if you are using AWS CodePipeline, we recommend +// that you disable webhooks in AWS CodeBuild. In the AWS CodeBuild console, +// clear the Webhook box. For more information, see step 5 in Change a Build +// Project's Settings (https://docs.aws.amazon.com/codebuild/latest/userguide/change-project.html#change-project-console). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -732,11 +731,11 @@ func (c *CodeBuild) CreateWebhookRequest(input *CreateWebhookInput) (req *reques // There was a problem with the underlying OAuth provider. // // * ResourceAlreadyExistsException -// The specified Amazon Web Services resource cannot be created, because an -// Amazon Web Services resource with the same settings already exists. +// The specified AWS resource cannot be created, because an AWS resource with +// the same settings already exists. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/CreateWebhook func (c *CodeBuild) CreateWebhook(input *CreateWebhookInput) (*CreateWebhookOutput, error) { @@ -1218,7 +1217,7 @@ func (c *CodeBuild) DeleteSourceCredentialsRequest(input *DeleteSourceCredential // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/DeleteSourceCredentials func (c *CodeBuild) DeleteSourceCredentials(input *DeleteSourceCredentialsInput) (*DeleteSourceCredentialsOutput, error) { @@ -1287,9 +1286,9 @@ func (c *CodeBuild) DeleteWebhookRequest(input *DeleteWebhookInput) (req *reques // DeleteWebhook API operation for AWS CodeBuild. // -// For an existing CodeBuild build project that has its source code stored in -// a GitHub or Bitbucket repository, stops CodeBuild from rebuilding the source -// code every time a code change is pushed to the repository. +// For an existing AWS CodeBuild build project that has its source code stored +// in a GitHub or Bitbucket repository, stops AWS CodeBuild from rebuilding +// the source code every time a code change is pushed to the repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1303,7 +1302,7 @@ func (c *CodeBuild) DeleteWebhookRequest(input *DeleteWebhookInput) (req *reques // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // * OAuthProviderException // There was a problem with the underlying OAuth provider. @@ -1531,7 +1530,7 @@ func (c *CodeBuild) DescribeTestCasesRequest(input *DescribeTestCasesInput) (req // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/DescribeTestCases func (c *CodeBuild) DescribeTestCases(input *DescribeTestCasesInput) (*DescribeTestCasesOutput, error) { @@ -1665,7 +1664,7 @@ func (c *CodeBuild) GetReportGroupTrendRequest(input *GetReportGroupTrendInput) // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/GetReportGroupTrend func (c *CodeBuild) GetReportGroupTrend(input *GetReportGroupTrendInput) (*GetReportGroupTrendOutput, error) { @@ -1744,7 +1743,7 @@ func (c *CodeBuild) GetResourcePolicyRequest(input *GetResourcePolicyInput) (req // // Returned Error Types: // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // * InvalidInputException // The input value that was provided is not valid. @@ -1815,8 +1814,8 @@ func (c *CodeBuild) ImportSourceCredentialsRequest(input *ImportSourceCredential // ImportSourceCredentials API operation for AWS CodeBuild. // -// Imports the source repository credentials for an CodeBuild project that has -// its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository. +// Imports the source repository credentials for an AWS CodeBuild project that +// has its source code stored in a GitHub, GitHub Enterprise, or Bitbucket repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1830,12 +1829,11 @@ func (c *CodeBuild) ImportSourceCredentialsRequest(input *ImportSourceCredential // The input value that was provided is not valid. // // * AccountLimitExceededException -// An Amazon Web Services service limit was exceeded for the calling Amazon -// Web Services account. +// An AWS service limit was exceeded for the calling AWS account. // // * ResourceAlreadyExistsException -// The specified Amazon Web Services resource cannot be created, because an -// Amazon Web Services resource with the same settings already exists. +// The specified AWS resource cannot be created, because an AWS resource with +// the same settings already exists. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ImportSourceCredentials func (c *CodeBuild) ImportSourceCredentials(input *ImportSourceCredentialsInput) (*ImportSourceCredentialsOutput, error) { @@ -1918,7 +1916,7 @@ func (c *CodeBuild) InvalidateProjectCacheRequest(input *InvalidateProjectCacheI // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/InvalidateProjectCache func (c *CodeBuild) InvalidateProjectCache(input *InvalidateProjectCacheInput) (*InvalidateProjectCacheOutput, error) { @@ -2143,7 +2141,7 @@ func (c *CodeBuild) ListBuildBatchesForProjectRequest(input *ListBuildBatchesFor // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListBuildBatchesForProject func (c *CodeBuild) ListBuildBatchesForProject(input *ListBuildBatchesForProjectInput) (*ListBuildBatchesForProjectOutput, error) { @@ -2421,7 +2419,7 @@ func (c *CodeBuild) ListBuildsForProjectRequest(input *ListBuildsForProjectInput // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListBuildsForProject func (c *CodeBuild) ListBuildsForProject(input *ListBuildsForProjectInput) (*ListBuildsForProjectOutput, error) { @@ -2541,7 +2539,7 @@ func (c *CodeBuild) ListCuratedEnvironmentImagesRequest(input *ListCuratedEnviro // ListCuratedEnvironmentImages API operation for AWS CodeBuild. // -// Gets information about Docker images that are managed by CodeBuild. +// Gets information about Docker images that are managed by AWS CodeBuild. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2759,8 +2757,7 @@ func (c *CodeBuild) ListReportGroupsRequest(input *ListReportGroupsInput) (req * // ListReportGroups API operation for AWS CodeBuild. // -// Gets a list ARNs for the report groups in the current Amazon Web Services -// account. +// Gets a list ARNs for the report groups in the current AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2897,8 +2894,7 @@ func (c *CodeBuild) ListReportsRequest(input *ListReportsInput) (req *request.Re // ListReports API operation for AWS CodeBuild. // -// Returns a list of ARNs for the reports in the current Amazon Web Services -// account. +// Returns a list of ARNs for the reports in the current AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3049,7 +3045,7 @@ func (c *CodeBuild) ListReportsForReportGroupRequest(input *ListReportsForReport // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListReportsForReportGroup func (c *CodeBuild) ListReportsForReportGroup(input *ListReportsForReportGroupInput) (*ListReportsForReportGroupOutput, error) { @@ -3175,8 +3171,7 @@ func (c *CodeBuild) ListSharedProjectsRequest(input *ListSharedProjectsInput) (r // ListSharedProjects API operation for AWS CodeBuild. // -// Gets a list of projects that are shared with other Amazon Web Services accounts -// or users. +// Gets a list of projects that are shared with other AWS accounts or users. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3313,8 +3308,7 @@ func (c *CodeBuild) ListSharedReportGroupsRequest(input *ListSharedReportGroupsI // ListSharedReportGroups API operation for AWS CodeBuild. // -// Gets a list of report groups that are shared with other Amazon Web Services -// accounts or users. +// Gets a list of report groups that are shared with other AWS accounts or users. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3535,7 +3529,7 @@ func (c *CodeBuild) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req // // Returned Error Types: // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // * InvalidInputException // The input value that was provided is not valid. @@ -3620,11 +3614,10 @@ func (c *CodeBuild) RetryBuildRequest(input *RetryBuildInput) (req *request.Requ // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // * AccountLimitExceededException -// An Amazon Web Services service limit was exceeded for the calling Amazon -// Web Services account. +// An AWS service limit was exceeded for the calling AWS account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/RetryBuild func (c *CodeBuild) RetryBuild(input *RetryBuildInput) (*RetryBuildOutput, error) { @@ -3707,7 +3700,7 @@ func (c *CodeBuild) RetryBuildBatchRequest(input *RetryBuildBatchInput) (req *re // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/RetryBuildBatch func (c *CodeBuild) RetryBuildBatch(input *RetryBuildBatchInput) (*RetryBuildBatchOutput, error) { @@ -3789,11 +3782,10 @@ func (c *CodeBuild) StartBuildRequest(input *StartBuildInput) (req *request.Requ // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // * AccountLimitExceededException -// An Amazon Web Services service limit was exceeded for the calling Amazon -// Web Services account. +// An AWS service limit was exceeded for the calling AWS account. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StartBuild func (c *CodeBuild) StartBuild(input *StartBuildInput) (*StartBuildOutput, error) { @@ -3875,7 +3867,7 @@ func (c *CodeBuild) StartBuildBatchRequest(input *StartBuildBatchInput) (req *re // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StartBuildBatch func (c *CodeBuild) StartBuildBatch(input *StartBuildBatchInput) (*StartBuildBatchOutput, error) { @@ -3957,7 +3949,7 @@ func (c *CodeBuild) StopBuildRequest(input *StopBuildInput) (req *request.Reques // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StopBuild func (c *CodeBuild) StopBuild(input *StopBuildInput) (*StopBuildOutput, error) { @@ -4039,7 +4031,7 @@ func (c *CodeBuild) StopBuildBatchRequest(input *StopBuildBatchInput) (req *requ // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StopBuildBatch func (c *CodeBuild) StopBuildBatch(input *StopBuildBatchInput) (*StopBuildBatchOutput, error) { @@ -4121,7 +4113,7 @@ func (c *CodeBuild) UpdateProjectRequest(input *UpdateProjectInput) (req *reques // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateProject func (c *CodeBuild) UpdateProject(input *UpdateProjectInput) (*UpdateProjectOutput, error) { @@ -4203,7 +4195,7 @@ func (c *CodeBuild) UpdateReportGroupRequest(input *UpdateReportGroupInput) (req // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateReportGroup func (c *CodeBuild) UpdateReportGroup(input *UpdateReportGroupInput) (*UpdateReportGroupOutput, error) { @@ -4271,7 +4263,7 @@ func (c *CodeBuild) UpdateWebhookRequest(input *UpdateWebhookInput) (req *reques // UpdateWebhook API operation for AWS CodeBuild. // -// Updates the webhook associated with an CodeBuild build project. +// Updates the webhook associated with an AWS CodeBuild build project. // // If you use Bitbucket for your repository, rotateSecret is ignored. // @@ -4287,7 +4279,7 @@ func (c *CodeBuild) UpdateWebhookRequest(input *UpdateWebhookInput) (req *reques // The input value that was provided is not valid. // // * ResourceNotFoundException -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. // // * OAuthProviderException // There was a problem with the underlying OAuth provider. @@ -4314,8 +4306,7 @@ func (c *CodeBuild) UpdateWebhookWithContext(ctx aws.Context, input *UpdateWebho return out, req.Send() } -// An Amazon Web Services service limit was exceeded for the calling Amazon -// Web Services account. +// An AWS service limit was exceeded for the calling AWS account. type AccountLimitExceededException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -4591,8 +4582,8 @@ type BatchGetProjectsInput struct { _ struct{} `type:"structure"` // The names or ARNs of the build projects. To get information about a project - // shared with your Amazon Web Services account, its ARN must be specified. - // You cannot specify a shared project using its name. + // shared with your AWS account, its ARN must be specified. You cannot specify + // a shared project using its name. // // Names is a required field Names []*string `locationName:"names" min:"1" type:"list" required:"true"` @@ -4816,7 +4807,7 @@ type BatchRestrictions struct { // An array of strings that specify the compute types that are allowed for the // batch build. See Build environment compute types (https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) - // in the CodeBuild User Guide for these values. + // in the AWS CodeBuild User Guide for these values. ComputeTypesAllowed []*string `locationName:"computeTypesAllowed" type:"list"` // Specifies the maximum number of builds allowed. @@ -4890,8 +4881,8 @@ type Build struct { // Contains information about the debug session for this build. DebugSession *DebugSession `locationName:"debugSession" type:"structure"` - // The Key Management Service customer master key (CMK) to be used for encrypting - // the build output artifacts. + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the build output artifacts. // // You can use a cross-account KMS key to encrypt the build output artifacts // if your service role has permission to that key. @@ -4908,11 +4899,11 @@ type Build struct { // A list of exported environment variables for this build. // - // Exported environment variables are used in conjunction with CodePipeline + // Exported environment variables are used in conjunction with AWS CodePipeline // to export environment variables from the current build stage to subsequent // stages in the pipeline. For more information, see Working with variables // (https://docs.aws.amazon.com/codepipeline/latest/userguide/actions-variables.html) - // in the CodePipeline User Guide. + // in the AWS CodePipeline User Guide. ExportedEnvironmentVariables []*ExportedEnvironmentVariable `locationName:"exportedEnvironmentVariables" type:"list"` // An array of ProjectFileSystemLocation objects for a CodeBuild build project. @@ -4925,16 +4916,17 @@ type Build struct { // The entity that started the build. Valid values include: // - // * If CodePipeline started the build, the pipeline's name (for example, + // * If AWS CodePipeline started the build, the pipeline's name (for example, // codepipeline/my-demo-pipeline). // - // * If an Identity and Access Management user started the build, the user's - // name (for example, MyUserName). + // * If an AWS Identity and Access Management (IAM) user started the build, + // the user's name (for example, MyUserName). // - // * If the Jenkins plugin for CodeBuild started the build, the string CodeBuild-Jenkins-Plugin. + // * If the Jenkins plugin for AWS CodeBuild started the build, the string + // CodeBuild-Jenkins-Plugin. Initiator *string `locationName:"initiator" type:"string"` - // Information about the build's logs in CloudWatch Logs. + // Information about the build's logs in Amazon CloudWatch Logs. Logs *LogsLocation `locationName:"logs" type:"structure"` // Describes a network interface. @@ -4944,7 +4936,7 @@ type Build struct { // about any current build phase that is not yet complete. Phases []*BuildPhase `locationName:"phases" type:"list"` - // The name of the CodeBuild project. + // The name of the AWS CodeBuild project. ProjectName *string `locationName:"projectName" min:"1" type:"string"` // The number of minutes a build is allowed to be queued before it times out. @@ -4955,10 +4947,10 @@ type Build struct { // An identifier for the version of this build's source code. // - // * For CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit + // * For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit // ID. // - // * For CodePipeline, the source revision provided by CodePipeline. + // * For AWS CodePipeline, the source revision provided by AWS CodePipeline. // // * For Amazon S3, this does not apply. ResolvedSourceVersion *string `locationName:"resolvedSourceVersion" min:"1" type:"string"` @@ -4969,7 +4961,7 @@ type Build struct { // An array of ProjectSourceVersion objects. Each ProjectSourceVersion must // be one of: // - // * For CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // // * For GitHub: the commit ID, pull request ID, branch name, or tag name // that corresponds to the version of the source code you want to build. @@ -5001,17 +4993,17 @@ type Build struct { // (at the build level) takes precedence. // // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. SourceVersion *string `locationName:"sourceVersion" min:"1" type:"string"` // When the build process started, expressed in Unix time format. StartTime *time.Time `locationName:"startTime" type:"timestamp"` - // How long, in minutes, for CodeBuild to wait before timing out this build + // How long, in minutes, for AWS CodeBuild to wait before timing out this build // if it does not get marked as completed. TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" type:"integer"` - // If your CodeBuild project accesses resources in an Amazon VPC, you provide + // If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide // this parameter that identifies the VPC ID and the list of security group // IDs and subnet IDs. The security groups and subnets must belong to the same // VPC. You must provide at least one security group and one subnet ID. @@ -5227,6 +5219,38 @@ type BuildArtifacts struct { // An identifier for this artifact definition. ArtifactIdentifier *string `locationName:"artifactIdentifier" type:"string"` + // Specifies the access for objects that are uploaded to an Amazon S3 bucket + // that is owned by another account. + // + // By default, only the account that uploads the objects to the bucket has access + // to these objects. This property allows you to give the bucket owner access + // to these objects. + // + // NONE + // + // The bucket owner does not have access to the objects. This is the default. + // + // READ_ONLY + // + // The bucket owner has read only access to the objects. The uploading account + // retains ownership of the objects. + // + // FULL + // + // The bucket owner has full access to the objects. Object ownership is determined + // by the following criteria: + // + // * If the bucket is configured with the Bucket owner preferred setting, + // the bucket owner owns the objects. The uploading account will have object + // access as specified by the bucket's policy. + // + // * Otherwise, the uploading account retains ownership of the objects. + // + // For more information about Amazon S3 object ownership, see Controlling ownership + // of uploaded objects using S3 Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon Simple Storage Service User Guide. + BucketOwnerAccess *string `locationName:"bucketOwnerAccess" type:"string" enum:"BucketOwnerAccess"` + // Information that tells you if encryption for build artifacts is disabled. EncryptionDisabled *bool `locationName:"encryptionDisabled" type:"boolean"` @@ -5274,6 +5298,12 @@ func (s *BuildArtifacts) SetArtifactIdentifier(v string) *BuildArtifacts { return s } +// SetBucketOwnerAccess sets the BucketOwnerAccess field's value. +func (s *BuildArtifacts) SetBucketOwnerAccess(v string) *BuildArtifacts { + s.BucketOwnerAccess = &v + return s +} + // SetEncryptionDisabled sets the EncryptionDisabled field's value. func (s *BuildArtifacts) SetEncryptionDisabled(v bool) *BuildArtifacts { s.EncryptionDisabled = &v @@ -5348,8 +5378,8 @@ type BuildBatch struct { // Batch session debugging is not supported for matrix batch builds. DebugSessionEnabled *bool `locationName:"debugSessionEnabled" type:"boolean"` - // The Key Management Service customer master key (CMK) to be used for encrypting - // the batch build output artifacts. + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the batch build output artifacts. // // You can use a cross-account KMS key to encrypt the build output artifacts // if your service role has permission to that key. @@ -5374,16 +5404,17 @@ type BuildBatch struct { // The entity that started the batch build. Valid values include: // - // * If CodePipeline started the build, the pipeline's name (for example, + // * If AWS CodePipeline started the build, the pipeline's name (for example, // codepipeline/my-demo-pipeline). // - // * If an Identity and Access Management user started the build, the user's - // name. + // * If an AWS Identity and Access Management (IAM) user started the build, + // the user's name. // - // * If the Jenkins plugin for CodeBuild started the build, the string CodeBuild-Jenkins-Plugin. + // * If the Jenkins plugin for AWS CodeBuild started the build, the string + // CodeBuild-Jenkins-Plugin. Initiator *string `locationName:"initiator" type:"string"` - // Information about logs for a build project. These can be logs in CloudWatch + // Information about logs for a build project. These can be logs in Amazon CloudWatch // Logs, built in a specified S3 bucket, or both. LogConfig *LogsConfig `locationName:"logConfig" type:"structure"` @@ -5399,10 +5430,10 @@ type BuildBatch struct { // The identifier of the resolved version of this batch build's source code. // - // * For CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit + // * For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit // ID. // - // * For CodePipeline, the source revision provided by CodePipeline. + // * For AWS CodePipeline, the source revision provided by AWS CodePipeline. // // * For Amazon S3, this does not apply. ResolvedSourceVersion *string `locationName:"resolvedSourceVersion" min:"1" type:"string"` @@ -5414,7 +5445,7 @@ type BuildBatch struct { // An array of ProjectSourceVersion objects. Each ProjectSourceVersion must // be one of: // - // * For CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // // * For GitHub: the commit ID, pull request ID, branch name, or tag name // that corresponds to the version of the source code you want to build. @@ -5447,7 +5478,7 @@ type BuildBatch struct { // The date and time that the batch build started. StartTime *time.Time `locationName:"startTime" type:"timestamp"` - // Information about the VPC configuration that CodeBuild accesses. + // Information about the VPC configuration that AWS CodeBuild accesses. VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` } @@ -6013,7 +6044,7 @@ func (s *BuildPhase) SetStartTime(v time.Time) *BuildPhase { return s } -// Contains information that defines how the CodeBuild build project reports +// Contains information that defines how the AWS CodeBuild build project reports // the build status to the source provider. type BuildStatusConfig struct { _ struct{} `type:"structure"` @@ -6159,25 +6190,25 @@ func (s *BuildSummary) SetSecondaryArtifacts(v []*ResolvedArtifact) *BuildSummar return s } -// Information about CloudWatch Logs for a build project. +// Information about Amazon CloudWatch Logs for a build project. type CloudWatchLogsConfig struct { _ struct{} `type:"structure"` - // The group name of the logs in CloudWatch Logs. For more information, see - // Working with Log Groups and Log Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html). + // The group name of the logs in Amazon CloudWatch Logs. For more information, + // see Working with Log Groups and Log Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html). GroupName *string `locationName:"groupName" type:"string"` - // The current status of the logs in CloudWatch Logs for a build project. Valid - // values are: + // The current status of the logs in Amazon CloudWatch Logs for a build project. + // Valid values are: // - // * ENABLED: CloudWatch Logs are enabled for this build project. + // * ENABLED: Amazon CloudWatch Logs are enabled for this build project. // - // * DISABLED: CloudWatch Logs are not enabled for this build project. + // * DISABLED: Amazon CloudWatch Logs are not enabled for this build project. // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"LogsConfigStatusType"` - // The prefix of the stream name of the CloudWatch Logs. For more information, + // The prefix of the stream name of the Amazon CloudWatch Logs. For more information, // see Working with Log Groups and Log Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html). StreamName *string `locationName:"streamName" type:"string"` } @@ -6439,8 +6470,8 @@ type CreateProjectInput struct { // A description that makes the build project easy to identify. Description *string `locationName:"description" type:"string"` - // The Key Management Service customer master key (CMK) to be used for encrypting - // the build output artifacts. + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the build output artifacts. // // You can use a cross-account KMS key to encrypt the build output artifacts // if your service role has permission to that key. @@ -6459,8 +6490,8 @@ type CreateProjectInput struct { // mountPoint, and type of a file system created using Amazon Elastic File System. FileSystemLocations []*ProjectFileSystemLocation `locationName:"fileSystemLocations" type:"list"` - // Information about logs for the build project. These can be logs in CloudWatch - // Logs, logs uploaded to a specified S3 bucket, or both. + // Information about logs for the build project. These can be logs in Amazon + // CloudWatch Logs, logs uploaded to a specified S3 bucket, or both. LogsConfig *LogsConfig `locationName:"logsConfig" type:"structure"` // The name of the build project. @@ -6482,9 +6513,9 @@ type CreateProjectInput struct { // An array of ProjectSource objects. SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` - // The ARN of the Identity and Access Management role that enables CodeBuild - // to interact with dependent Amazon Web Services services on behalf of the - // Amazon Web Services account. + // The ARN of the AWS Identity and Access Management (IAM) role that enables + // AWS CodeBuild to interact with dependent AWS services on behalf of the AWS + // account. // // ServiceRole is a required field ServiceRole *string `locationName:"serviceRole" min:"1" type:"string" required:"true"` @@ -6497,7 +6528,7 @@ type CreateProjectInput struct { // A version of the build input to be built for this project. If not specified, // the latest version is used. If specified, it must be one of: // - // * For CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // // * For GitHub: the commit ID, pull request ID, branch name, or tag name // that corresponds to the version of the source code you want to build. @@ -6518,21 +6549,21 @@ type CreateProjectInput struct { // precedence over this sourceVersion (at the project level). // // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. SourceVersion *string `locationName:"sourceVersion" type:"string"` // A list of tag key and value pairs associated with this build project. // - // These tags are available for use by Amazon Web Services services that support - // CodeBuild build project tags. + // These tags are available for use by AWS services that support AWS CodeBuild + // build project tags. Tags []*Tag `locationName:"tags" type:"list"` - // How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before - // it times out any build that has not been marked as completed. The default - // is 60 minutes. + // How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait + // before it times out any build that has not been marked as completed. The + // default is 60 minutes. TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" min:"5" type:"integer"` - // VpcConfig enables CodeBuild to access resources in an Amazon VPC. + // VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC. VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` } @@ -6826,8 +6857,8 @@ type CreateReportGroupInput struct { // A list of tag key and value pairs associated with this report group. // - // These tags are available for use by Amazon Web Services services that support - // CodeBuild report group tags. + // These tags are available for use by AWS services that support AWS CodeBuild + // report group tags. Tags []*Tag `locationName:"tags" type:"list"` // The type of report group. @@ -6952,7 +6983,7 @@ type CreateWebhookInput struct { // array must pass. For a filter group to pass, each of its filters must pass. FilterGroups [][]*WebhookFilter `locationName:"filterGroups" type:"list"` - // The name of the CodeBuild project. + // The name of the AWS CodeBuild project. // // ProjectName is a required field ProjectName *string `locationName:"projectName" min:"2" type:"string" required:"true"` @@ -7012,7 +7043,7 @@ type CreateWebhookOutput struct { _ struct{} `type:"structure"` // Information about a webhook that connects repository events to a build project - // in CodeBuild. + // in AWS CodeBuild. Webhook *Webhook `locationName:"webhook" type:"structure"` } @@ -7455,7 +7486,7 @@ func (s *DeleteSourceCredentialsOutput) SetArn(v string) *DeleteSourceCredential type DeleteWebhookInput struct { _ struct{} `type:"structure"` - // The name of the CodeBuild project. + // The name of the AWS CodeBuild project. // // ProjectName is a required field ProjectName *string `locationName:"projectName" min:"2" type:"string" required:"true"` @@ -7762,7 +7793,7 @@ func (s *DescribeTestCasesOutput) SetTestCases(v []*TestCase) *DescribeTestCases return s } -// Information about a Docker image that is managed by CodeBuild. +// Information about a Docker image that is managed by AWS CodeBuild. type EnvironmentImage struct { _ struct{} `type:"structure"` @@ -7805,7 +7836,7 @@ func (s *EnvironmentImage) SetVersions(v []*string) *EnvironmentImage { } // A set of Docker images that are related by programming language and are managed -// by CodeBuild. +// by AWS CodeBuild. type EnvironmentLanguage struct { _ struct{} `type:"structure"` @@ -7838,7 +7869,8 @@ func (s *EnvironmentLanguage) SetLanguage(v string) *EnvironmentLanguage { return s } -// A set of Docker images that are related by platform and are managed by CodeBuild. +// A set of Docker images that are related by platform and are managed by AWS +// CodeBuild. type EnvironmentPlatform struct { _ struct{} `type:"structure"` @@ -7882,28 +7914,27 @@ type EnvironmentVariable struct { // The type of environment variable. Valid values include: // - // * PARAMETER_STORE: An environment variable stored in Systems Manager Parameter - // Store. To learn how to specify a parameter store environment variable, - // see env/parameter-store (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec.env.parameter-store) - // in the CodeBuild User Guide. + // * PARAMETER_STORE: An environment variable stored in Amazon EC2 Systems + // Manager Parameter Store. To learn how to specify a parameter store environment + // variable, see env/parameter-store (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec.env.parameter-store) + // in the AWS CodeBuild User Guide. // // * PLAINTEXT: An environment variable in plain text format. This is the // default value. // - // * SECRETS_MANAGER: An environment variable stored in Secrets Manager. + // * SECRETS_MANAGER: An environment variable stored in AWS Secrets Manager. // To learn how to specify a secrets manager environment variable, see env/secrets-manager // (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec.env.secrets-manager) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. Type *string `locationName:"type" type:"string" enum:"EnvironmentVariableType"` // The value of the environment variable. // // We strongly discourage the use of PLAINTEXT environment variables to store - // sensitive values, especially Amazon Web Services secret key IDs and secret - // access keys. PLAINTEXT environment variables can be displayed in plain text - // using the CodeBuild console and the AWS Command Line Interface (AWS CLI). - // For sensitive values, we recommend you use an environment variable of type - // PARAMETER_STORE or SECRETS_MANAGER. + // sensitive values, especially AWS secret key IDs and secret access keys. PLAINTEXT + // environment variables can be displayed in plain text using the AWS CodeBuild + // console and the AWS Command Line Interface (AWS CLI). For sensitive values, + // we recommend you use an environment variable of type PARAMETER_STORE or SECRETS_MANAGER. // // Value is a required field Value *string `locationName:"value" type:"string" required:"true"` @@ -7958,11 +7989,11 @@ func (s *EnvironmentVariable) SetValue(v string) *EnvironmentVariable { // Contains information about an exported environment variable. // -// Exported environment variables are used in conjunction with CodePipeline +// Exported environment variables are used in conjunction with AWS CodePipeline // to export environment variables from the current build stage to subsequent // stages in the pipeline. For more information, see Working with variables // (https://docs.aws.amazon.com/codepipeline/latest/userguide/actions-variables.html) -// in the CodePipeline User Guide. +// in the AWS CodePipeline User Guide. // // During a build, the value of a variable is available starting with the install // phase. It can be updated between the start of the install phase and the end @@ -8206,12 +8237,12 @@ func (s *GetResourcePolicyOutput) SetPolicy(v string) *GetResourcePolicyOutput { return s } -// Information about the Git submodules configuration for an CodeBuild build +// Information about the Git submodules configuration for an AWS CodeBuild build // project. type GitSubmodulesConfig struct { _ struct{} `type:"structure"` - // Set to true to fetch Git submodules for your CodeBuild build project. + // Set to true to fetch Git submodules for your AWS CodeBuild build project. // // FetchSubmodules is a required field FetchSubmodules *bool `locationName:"fetchSubmodules" type:"boolean" required:"true"` @@ -8251,7 +8282,7 @@ type ImportSourceCredentialsInput struct { // The type of authentication used to connect to a GitHub, GitHub Enterprise, // or Bitbucket repository. An OAUTH connection is not supported by the API - // and must be created using the CodeBuild console. + // and must be created using the AWS CodeBuild console. // // AuthType is a required field AuthType *string `locationName:"authType" type:"string" required:"true" enum:"AuthType"` @@ -8424,7 +8455,7 @@ func (s *InvalidInputException) RequestID() string { type InvalidateProjectCacheInput struct { _ struct{} `type:"structure"` - // The name of the CodeBuild build project that the cache is reset for. + // The name of the AWS CodeBuild build project that the cache is reset for. // // ProjectName is a required field ProjectName *string `locationName:"projectName" min:"1" type:"string" required:"true"` @@ -8706,7 +8737,7 @@ type ListBuildsForProjectInput struct { // more next tokens are returned. NextToken *string `locationName:"nextToken" type:"string"` - // The name of the CodeBuild project. + // The name of the AWS CodeBuild project. // // ProjectName is a required field ProjectName *string `locationName:"projectName" min:"1" type:"string" required:"true"` @@ -8899,7 +8930,7 @@ type ListCuratedEnvironmentImagesOutput struct { _ struct{} `type:"structure"` // Information about supported platforms for Docker images that are managed - // by CodeBuild. + // by AWS CodeBuild. Platforms []*EnvironmentPlatform `locationName:"platforms" type:"list"` } @@ -9120,8 +9151,7 @@ type ListReportGroupsOutput struct { // returned. NextToken *string `locationName:"nextToken" type:"string"` - // The list of ARNs for the report groups in the current Amazon Web Services - // account. + // The list of ARNs for the report groups in the current AWS account. ReportGroups []*string `locationName:"reportGroups" min:"1" type:"list"` } @@ -9360,8 +9390,7 @@ type ListReportsOutput struct { // returned. NextToken *string `locationName:"nextToken" type:"string"` - // The list of returned ARNs for the reports in the current Amazon Web Services - // account. + // The list of returned ARNs for the reports in the current AWS account. Reports []*string `locationName:"reports" min:"1" type:"list"` } @@ -9404,8 +9433,8 @@ type ListSharedProjectsInput struct { // returned. NextToken *string `locationName:"nextToken" min:"1" type:"string"` - // The criterion to be used to list build projects shared with the current Amazon - // Web Services account or user. Valid values include: + // The criterion to be used to list build projects shared with the current AWS + // account or user. Valid values include: // // * ARN: List based on the ARN. // @@ -9483,8 +9512,8 @@ type ListSharedProjectsOutput struct { // returned. NextToken *string `locationName:"nextToken" type:"string"` - // The list of ARNs for the build projects shared with the current Amazon Web - // Services account or user. + // The list of ARNs for the build projects shared with the current AWS account + // or user. Projects []*string `locationName:"projects" min:"1" type:"list"` } @@ -9527,8 +9556,8 @@ type ListSharedReportGroupsInput struct { // returned. NextToken *string `locationName:"nextToken" type:"string"` - // The criterion to be used to list report groups shared with the current Amazon - // Web Services account or user. Valid values include: + // The criterion to be used to list report groups shared with the current AWS + // account or user. Valid values include: // // * ARN: List based on the ARN. // @@ -9603,8 +9632,8 @@ type ListSharedReportGroupsOutput struct { // returned. NextToken *string `locationName:"nextToken" type:"string"` - // The list of ARNs for the report groups shared with the current Amazon Web - // Services account or user. + // The list of ARNs for the report groups shared with the current AWS account + // or user. ReportGroups []*string `locationName:"reportGroups" min:"1" type:"list"` } @@ -9669,13 +9698,13 @@ func (s *ListSourceCredentialsOutput) SetSourceCredentialsInfos(v []*SourceCrede return s } -// Information about logs for a build project. These can be logs in CloudWatch +// Information about logs for a build project. These can be logs in Amazon CloudWatch // Logs, built in a specified S3 bucket, or both. type LogsConfig struct { _ struct{} `type:"structure"` - // Information about CloudWatch Logs for a build project. CloudWatch Logs are - // enabled by default. + // Information about Amazon CloudWatch Logs for a build project. Amazon CloudWatch + // Logs are enabled by default. CloudWatchLogs *CloudWatchLogsConfig `locationName:"cloudWatchLogs" type:"structure"` // Information about logs built to an S3 bucket for a build project. S3 logs @@ -9725,21 +9754,21 @@ func (s *LogsConfig) SetS3Logs(v *S3LogsConfig) *LogsConfig { return s } -// Information about build logs in CloudWatch Logs. +// Information about build logs in Amazon CloudWatch Logs. type LogsLocation struct { _ struct{} `type:"structure"` - // Information about CloudWatch Logs for a build project. + // Information about Amazon CloudWatch Logs for a build project. CloudWatchLogs *CloudWatchLogsConfig `locationName:"cloudWatchLogs" type:"structure"` - // The ARN of CloudWatch Logs for a build project. Its format is arn:${Partition}:logs:${Region}:${Account}:log-group:${LogGroupName}:log-stream:${LogStreamName}. - // For more information, see Resources Defined by CloudWatch Logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatchlogs.html#amazoncloudwatchlogs-resources-for-iam-policies). + // The ARN of Amazon CloudWatch Logs for a build project. Its format is arn:${Partition}:logs:${Region}:${Account}:log-group:${LogGroupName}:log-stream:${LogStreamName}. + // For more information, see Resources Defined by Amazon CloudWatch Logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatchlogs.html#amazoncloudwatchlogs-resources-for-iam-policies). CloudWatchLogsArn *string `locationName:"cloudWatchLogsArn" type:"string"` - // The URL to an individual build log in CloudWatch Logs. + // The URL to an individual build log in Amazon CloudWatch Logs. DeepLink *string `locationName:"deepLink" type:"string"` - // The name of the CloudWatch Logs group for the build logs. + // The name of the Amazon CloudWatch Logs group for the build logs. GroupName *string `locationName:"groupName" type:"string"` // The URL to a build log in an S3 bucket. @@ -9752,7 +9781,7 @@ type LogsLocation struct { // For more information, see Resources Defined by Amazon S3 (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html#amazons3-resources-for-iam-policies). S3LogsArn *string `locationName:"s3LogsArn" type:"string"` - // The name of the CloudWatch Logs stream for the build logs. + // The name of the Amazon CloudWatch Logs stream for the build logs. StreamName *string `locationName:"streamName" type:"string"` } @@ -9971,16 +10000,14 @@ type Project struct { // A description that makes the build project easy to identify. Description *string `locationName:"description" type:"string"` - // The Key Management Service customer master key (CMK) to be used for encrypting - // the build output artifacts. + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the build output artifacts. // // You can use a cross-account KMS key to encrypt the build output artifacts // if your service role has permission to that key. // // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, - // the CMK's alias (using the format alias/). If you don't specify - // a value, CodeBuild uses the managed CMK for Amazon Simple Storage Service - // (Amazon S3). + // the CMK's alias (using the format alias/). EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` // Information about the build environment for this build project. @@ -9996,7 +10023,7 @@ type Project struct { LastModified *time.Time `locationName:"lastModified" type:"timestamp"` // Information about logs for the build project. A project can create logs in - // CloudWatch Logs, an S3 bucket, or both. + // Amazon CloudWatch Logs, an S3 bucket, or both. LogsConfig *LogsConfig `locationName:"logsConfig" type:"structure"` // The name of the build project. @@ -10016,9 +10043,9 @@ type Project struct { // An array of ProjectSource objects. SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` - // The ARN of the Identity and Access Management role that enables CodeBuild - // to interact with dependent Amazon Web Services services on behalf of the - // Amazon Web Services account. + // The ARN of the AWS Identity and Access Management (IAM) role that enables + // AWS CodeBuild to interact with dependent AWS services on behalf of the AWS + // account. ServiceRole *string `locationName:"serviceRole" min:"1" type:"string"` // Information about the build input source code for this build project. @@ -10027,7 +10054,7 @@ type Project struct { // A version of the build input to be built for this project. If not specified, // the latest version is used. If specified, it must be one of: // - // * For CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // // * For GitHub: the commit ID, pull request ID, branch name, or tag name // that corresponds to the version of the source code you want to build. @@ -10048,25 +10075,25 @@ type Project struct { // precedence over this sourceVersion (at the project level). // // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. SourceVersion *string `locationName:"sourceVersion" type:"string"` // A list of tag key and value pairs associated with this build project. // - // These tags are available for use by Amazon Web Services services that support - // CodeBuild build project tags. + // These tags are available for use by AWS services that support AWS CodeBuild + // build project tags. Tags []*Tag `locationName:"tags" type:"list"` - // How long, in minutes, from 5 to 480 (8 hours), for CodeBuild to wait before - // timing out any related build that did not get marked as completed. The default - // is 60 minutes. + // How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait + // before timing out any related build that did not get marked as completed. + // The default is 60 minutes. TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" min:"5" type:"integer"` - // Information about the VPC configuration that CodeBuild accesses. + // Information about the VPC configuration that AWS CodeBuild accesses. VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` // Information about a webhook that connects repository events to a build project - // in CodeBuild. + // in AWS CodeBuild. Webhook *Webhook `locationName:"webhook" type:"structure"` } @@ -10237,6 +10264,38 @@ type ProjectArtifacts struct { // An identifier for this artifact definition. ArtifactIdentifier *string `locationName:"artifactIdentifier" type:"string"` + // Specifies the access for objects that are uploaded to an Amazon S3 bucket + // that is owned by another account. + // + // By default, only the account that uploads the objects to the bucket has access + // to these objects. This property allows you to give the bucket owner access + // to these objects. + // + // NONE + // + // The bucket owner does not have access to the objects. This is the default. + // + // READ_ONLY + // + // The bucket owner has read only access to the objects. The uploading account + // retains ownership of the objects. + // + // FULL + // + // The bucket owner has full access to the objects. Object ownership is determined + // by the following criteria: + // + // * If the bucket is configured with the Bucket owner preferred setting, + // the bucket owner owns the objects. The uploading account will have object + // access as specified by the bucket's policy. + // + // * Otherwise, the uploading account retains ownership of the objects. + // + // For more information about Amazon S3 object ownership, see Controlling ownership + // of uploaded objects using S3 Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon Simple Storage Service User Guide. + BucketOwnerAccess *string `locationName:"bucketOwnerAccess" type:"string" enum:"BucketOwnerAccess"` + // Set to true if you do not want your output artifacts encrypted. This option // is valid only if your artifacts type is Amazon S3. If this is set with another // artifacts type, an invalidInputException is thrown. @@ -10244,9 +10303,9 @@ type ProjectArtifacts struct { // Information about the build output artifact location: // - // * If type is set to CODEPIPELINE, CodePipeline ignores this value if specified. - // This is because CodePipeline manages its build output locations instead - // of CodeBuild. + // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value + // if specified. This is because AWS CodePipeline manages its build output + // locations instead of AWS CodeBuild. // // * If type is set to NO_ARTIFACTS, this value is ignored if specified, // because no build output is produced. @@ -10254,12 +10313,12 @@ type ProjectArtifacts struct { // * If type is set to S3, this is the name of the output bucket. Location *string `locationName:"location" type:"string"` - // Along with path and namespaceType, the pattern that CodeBuild uses to name - // and store the output artifact: + // Along with path and namespaceType, the pattern that AWS CodeBuild uses to + // name and store the output artifact: // - // * If type is set to CODEPIPELINE, CodePipeline ignores this value if specified. - // This is because CodePipeline manages its build output names instead of - // CodeBuild. + // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value + // if specified. This is because AWS CodePipeline manages its build output + // names instead of AWS CodeBuild. // // * If type is set to NO_ARTIFACTS, this value is ignored if specified, // because no build output is produced. @@ -10280,12 +10339,12 @@ type ProjectArtifacts struct { // name is set to "/", the output artifact is stored in MyArtifacts/. Name *string `locationName:"name" type:"string"` - // Along with path and name, the pattern that CodeBuild uses to determine the - // name and location to store the output artifact: + // Along with path and name, the pattern that AWS CodeBuild uses to determine + // the name and location to store the output artifact: // - // * If type is set to CODEPIPELINE, CodePipeline ignores this value if specified. - // This is because CodePipeline manages its build output names instead of - // CodeBuild. + // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value + // if specified. This is because AWS CodePipeline manages its build output + // names instead of AWS CodeBuild. // // * If type is set to NO_ARTIFACTS, this value is ignored if specified, // because no build output is produced. @@ -10306,25 +10365,25 @@ type ProjectArtifacts struct { // The type of build output artifact to create: // - // * If type is set to CODEPIPELINE, CodePipeline ignores this value if specified. - // This is because CodePipeline manages its build output artifacts instead - // of CodeBuild. + // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value + // if specified. This is because AWS CodePipeline manages its build output + // artifacts instead of AWS CodeBuild. // // * If type is set to NO_ARTIFACTS, this value is ignored if specified, // because no build output is produced. // - // * If type is set to S3, valid values include: NONE: CodeBuild creates + // * If type is set to S3, valid values include: NONE: AWS CodeBuild creates // in the output bucket a folder that contains the build output. This is - // the default if packaging is not specified. ZIP: CodeBuild creates in the - // output bucket a ZIP file that contains the build output. + // the default if packaging is not specified. ZIP: AWS CodeBuild creates + // in the output bucket a ZIP file that contains the build output. Packaging *string `locationName:"packaging" type:"string" enum:"ArtifactPackaging"` - // Along with namespaceType and name, the pattern that CodeBuild uses to name - // and store the output artifact: + // Along with namespaceType and name, the pattern that AWS CodeBuild uses to + // name and store the output artifact: // - // * If type is set to CODEPIPELINE, CodePipeline ignores this value if specified. - // This is because CodePipeline manages its build output names instead of - // CodeBuild. + // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value + // if specified. This is because AWS CodePipeline manages its build output + // names instead of AWS CodeBuild. // // * If type is set to NO_ARTIFACTS, this value is ignored if specified, // because no build output is produced. @@ -10339,8 +10398,8 @@ type ProjectArtifacts struct { // The type of build output artifact. Valid values include: // - // * CODEPIPELINE: The build project has build output generated through CodePipeline. - // The CODEPIPELINE type is not supported for secondaryArtifacts. + // * CODEPIPELINE: The build project has build output generated through AWS + // CodePipeline. The CODEPIPELINE type is not supported for secondaryArtifacts. // // * NO_ARTIFACTS: The build project does not produce any build output. // @@ -10379,6 +10438,12 @@ func (s *ProjectArtifacts) SetArtifactIdentifier(v string) *ProjectArtifacts { return s } +// SetBucketOwnerAccess sets the BucketOwnerAccess field's value. +func (s *ProjectArtifacts) SetBucketOwnerAccess(v string) *ProjectArtifacts { + s.BucketOwnerAccess = &v + return s +} + // SetEncryptionDisabled sets the EncryptionDisabled field's value. func (s *ProjectArtifacts) SetEncryptionDisabled(v bool) *ProjectArtifacts { s.EncryptionDisabled = &v @@ -10647,7 +10712,7 @@ type ProjectEnvironment struct { // The ARN of the Amazon S3 bucket, path prefix, and object key that contains // the PEM-encoded certificate for the build project. For more information, // see certificate (https://docs.aws.amazon.com/codebuild/latest/userguide/create-project-cli.html#cli.environment.certificate) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. Certificate *string `locationName:"certificate" type:"string"` // Information about the compute resources the build project uses. Available @@ -10676,7 +10741,7 @@ type ProjectEnvironment struct { // 8 vCPUs on ARM-based processors for builds. // // For more information, see Build Environment Compute Types (https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. // // ComputeType is a required field ComputeType *string `locationName:"computeType" type:"string" required:"true" enum:"ComputeType"` @@ -10696,24 +10761,21 @@ type ProjectEnvironment struct { // to specify an image with the digest "sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf," // use /@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf. // - // For more information, see Docker images provided by CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-available.html) - // in the CodeBuild user guide. - // // Image is a required field Image *string `locationName:"image" min:"1" type:"string" required:"true"` - // The type of credentials CodeBuild uses to pull images in your build. There - // are two valid values: + // The type of credentials AWS CodeBuild uses to pull images in your build. + // There are two valid values: // - // * CODEBUILD specifies that CodeBuild uses its own credentials. This requires - // that you modify your ECR repository policy to trust CodeBuild service - // principal. + // * CODEBUILD specifies that AWS CodeBuild uses its own credentials. This + // requires that you modify your ECR repository policy to trust AWS CodeBuild's + // service principal. // - // * SERVICE_ROLE specifies that CodeBuild uses your build project's service - // role. + // * SERVICE_ROLE specifies that AWS CodeBuild uses your build project's + // service role. // // When you use a cross-account or private registry image, you must use SERVICE_ROLE - // credentials. When you use an CodeBuild curated image, you must use CODEBUILD + // credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD // credentials. ImagePullCredentialsType *string `locationName:"imagePullCredentialsType" type:"string" enum:"ImagePullCredentialsType"` @@ -10763,13 +10825,6 @@ type ProjectEnvironment struct { // Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) , China // (Beijing), and China (Ningxia). // - // * The environment types WINDOWS_CONTAINER and WINDOWS_SERVER_2019_CONTAINER - // are available only in regions US East (N. Virginia), US East (Ohio), US - // West (Oregon), and EU (Ireland). - // - // For more information, see Build environment compute types (https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) - // in the CodeBuild user guide. - // // Type is a required field Type *string `locationName:"type" type:"string" required:"true" enum:"EnvironmentType"` } @@ -10884,9 +10939,9 @@ type ProjectFileSystemLocation struct { // A string that specifies the location of the file system created by Amazon // EFS. Its format is efs-dns-name:/directory-path. You can find the DNS name - // of file system when you view it in the Amazon EFS console. The directory - // path is a path to a directory in the file system that CodeBuild mounts. For - // example, if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com, + // of file system when you view it in the AWS EFS console. The directory path + // is a path to a directory in the file system that CodeBuild mounts. For example, + // if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com, // and its mount directory is my-efs-mount-directory, then the location is fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory. // // The directory path in the format efs-dns-name:/directory-path is optional. @@ -10894,7 +10949,7 @@ type ProjectFileSystemLocation struct { // and CodeBuild mounts the entire file system. Location *string `locationName:"location" type:"string"` - // The mount options for a file system created by Amazon EFS. The default mount + // The mount options for a file system created by AWS EFS. The default mount // options used by CodeBuild are nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2. // For more information, see Recommended NFS Mount Options (https://docs.aws.amazon.com/efs/latest/ug/mounting-fs-nfs-mount-settings.html). MountOptions *string `locationName:"mountOptions" type:"string"` @@ -10950,10 +11005,10 @@ func (s *ProjectFileSystemLocation) SetType(v string) *ProjectFileSystemLocation type ProjectSource struct { _ struct{} `type:"structure"` - // Information about the authorization settings for CodeBuild to access the - // source code to be built. + // Information about the authorization settings for AWS CodeBuild to access + // the source code to be built. // - // This information is for the CodeBuild console's use only. Your code should + // This information is for the AWS CodeBuild console's use only. Your code should // not get or set this information directly. Auth *SourceAuth `locationName:"auth" type:"structure"` @@ -10967,7 +11022,7 @@ type ProjectSource struct { // If this value is set, it can be either an inline buildspec definition, the // path to an alternate buildspec file relative to the value of the built-in // CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The - // bucket must be in the same Region as the build project. Specify the buildspec + // bucket must be in the same AWS Region as the build project. Specify the buildspec // file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). // If this value is not provided or is set to an empty string, the source code // must contain a buildspec file in its root directory. For more information, @@ -10988,13 +11043,13 @@ type ProjectSource struct { // include: // // * For source code settings that are specified in the source action of - // a pipeline in CodePipeline, location should not be specified. If it is - // specified, CodePipeline ignores it. This is because CodePipeline uses - // the settings in a pipeline's source action instead of this value. + // a pipeline in AWS CodePipeline, location should not be specified. If it + // is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline + // uses the settings in a pipeline's source action instead of this value. // - // * For source code in an CodeCommit repository, the HTTPS clone URL to - // the repository that contains the source code and the buildspec file (for - // example, https://git-codecommit..amazonaws.com/v1/repos/). + // * For source code in an AWS CodeCommit repository, the HTTPS clone URL + // to the repository that contains the source code and the buildspec file + // (for example, https://git-codecommit..amazonaws.com/v1/repos/). // // * For source code in an Amazon S3 input bucket, one of the following. // The path to the ZIP file that contains the source code (for example, //.zip). @@ -11002,29 +11057,26 @@ type ProjectSource struct { // // * For source code in a GitHub repository, the HTTPS clone URL to the repository // that contains the source and the buildspec file. You must connect your - // account to your GitHub account. Use the CodeBuild console to start creating - // a build project. When you use the console to connect (or reconnect) with - // GitHub, on the GitHub Authorize application page, for Organization access, - // choose Request access next to each repository you want to allow CodeBuild - // to have access to, and then choose Authorize application. (After you have - // connected to your GitHub account, you do not need to finish creating the - // build project. You can leave the CodeBuild console.) To instruct CodeBuild - // to use this connection, in the source object, set the auth object's type - // value to OAUTH. + // AWS account to your GitHub account. Use the AWS CodeBuild console to start + // creating a build project. When you use the console to connect (or reconnect) + // with GitHub, on the GitHub Authorize application page, for Organization + // access, choose Request access next to each repository you want to allow + // AWS CodeBuild to have access to, and then choose Authorize application. + // (After you have connected to your GitHub account, you do not need to finish + // creating the build project. You can leave the AWS CodeBuild console.) + // To instruct AWS CodeBuild to use this connection, in the source object, + // set the auth object's type value to OAUTH. // // * For source code in a Bitbucket repository, the HTTPS clone URL to the // repository that contains the source and the buildspec file. You must connect - // your Amazon Web Services account to your Bitbucket account. Use the CodeBuild - // console to start creating a build project. When you use the console to - // connect (or reconnect) with Bitbucket, on the Bitbucket Confirm access - // to your account page, choose Grant access. (After you have connected to - // your Bitbucket account, you do not need to finish creating the build project. - // You can leave the CodeBuild console.) To instruct CodeBuild to use this + // your AWS account to your Bitbucket account. Use the AWS CodeBuild console + // to start creating a build project. When you use the console to connect + // (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your + // account page, choose Grant access. (After you have connected to your Bitbucket + // account, you do not need to finish creating the build project. You can + // leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this // connection, in the source object, set the auth object's type value to // OAUTH. - // - // If you specify CODEPIPELINE for the Type property, don't specify this property. - // For all of the other types, you must specify Location. Location *string `locationName:"location" type:"string"` // Set to true to report the status of a build's start and finish to your source @@ -11036,7 +11088,7 @@ type ProjectSource struct { // with the source provider must have write access to the repo. If the user // does not have write access, the build status cannot be updated. For more // information, see Source provider access (https://docs.aws.amazon.com/codebuild/latest/userguide/access-tokens.html) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. // // The status of a build triggered by a webhook is always reported to your source // provider. @@ -11051,10 +11103,10 @@ type ProjectSource struct { // // * BITBUCKET: The source code is in a Bitbucket repository. // - // * CODECOMMIT: The source code is in an CodeCommit repository. + // * CODECOMMIT: The source code is in an AWS CodeCommit repository. // // * CODEPIPELINE: The source code settings are specified in the source action - // of a pipeline in CodePipeline. + // of a pipeline in AWS CodePipeline. // // * GITHUB: The source code is in a GitHub or GitHub Enterprise Cloud repository. // @@ -11176,7 +11228,7 @@ type ProjectSourceVersion struct { // The source version for the corresponding source identifier. If specified, // must be one of: // - // * For CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // // * For GitHub: the commit ID, pull request ID, branch name, or tag name // that corresponds to the version of the source code you want to build. @@ -11194,7 +11246,7 @@ type ProjectSourceVersion struct { // input ZIP file to use. // // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. // // SourceVersion is a required field SourceVersion *string `locationName:"sourceVersion" type:"string" required:"true"` @@ -11244,7 +11296,7 @@ type PutResourcePolicyInput struct { // A JSON-formatted resource policy. For more information, see Sharing a Project // (https://docs.aws.amazon.com/codebuild/latest/userguide/project-sharing.html#project-sharing-share) // and Sharing a Report Group (https://docs.aws.amazon.com/codebuild/latest/userguide/report-groups-sharing.html#report-groups-sharing-share) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. // // Policy is a required field Policy *string `locationName:"policy" min:"1" type:"string" required:"true"` @@ -11331,22 +11383,22 @@ func (s *PutResourcePolicyOutput) SetResourceArn(v string) *PutResourcePolicyOut // // * images cannot be curated or an Amazon ECR image. // -// For more information, see Private Registry with Secrets Manager Sample for -// CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-private-registry.html). +// For more information, see Private Registry with AWS Secrets Manager Sample +// for AWS CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-private-registry.html). type RegistryCredential struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) or name of credentials created using Secrets + // The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets // Manager. // // The credential can use the name of the credentials only if they exist in - // your current Region. + // your current AWS Region. // // Credential is a required field Credential *string `locationName:"credential" min:"1" type:"string" required:"true"` // The service that created the credentials to access a private Docker registry. - // The valid value, SECRETS_MANAGER, is for Secrets Manager. + // The valid value, SECRETS_MANAGER, is for AWS Secrets Manager. // // CredentialProvider is a required field CredentialProvider *string `locationName:"credentialProvider" type:"string" required:"true" enum:"CredentialProviderType"` @@ -11647,8 +11699,8 @@ type ReportGroup struct { // A list of tag key and value pairs associated with this report group. // - // These tags are available for use by Amazon Web Services services that support - // CodeBuild report group tags. + // These tags are available for use by AWS services that support AWS CodeBuild + // report group tags. Tags []*Tag `locationName:"tags" type:"list"` // The type of the ReportGroup. This can be one of the following values: @@ -11840,8 +11892,8 @@ func (s *ResolvedArtifact) SetType(v string) *ResolvedArtifact { return s } -// The specified Amazon Web Services resource cannot be created, because an -// Amazon Web Services resource with the same settings already exists. +// The specified AWS resource cannot be created, because an AWS resource with +// the same settings already exists. type ResourceAlreadyExistsException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -11897,7 +11949,7 @@ func (s *ResourceAlreadyExistsException) RequestID() string { return s.RespMetadata.RequestID } -// The specified Amazon Web Services resource cannot be found. +// The specified AWS resource cannot be found. type ResourceNotFoundException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -11962,8 +12014,8 @@ type RetryBuildBatchInput struct { // A unique, case sensitive identifier you provide to ensure the idempotency // of the RetryBuildBatch request. The token is included in the RetryBuildBatch // request and is valid for five minutes. If you repeat the RetryBuildBatch - // request with the same token, but change a parameter, CodeBuild returns a - // parameter mismatch error. + // request with the same token, but change a parameter, AWS CodeBuild returns + // a parameter mismatch error. IdempotencyToken *string `locationName:"idempotencyToken" type:"string"` // Specifies the type of retry to perform. @@ -12043,8 +12095,8 @@ type RetryBuildInput struct { // A unique, case sensitive identifier you provide to ensure the idempotency // of the RetryBuild request. The token is included in the RetryBuild request // and is valid for five minutes. If you repeat the RetryBuild request with - // the same token, but change a parameter, CodeBuild returns a parameter mismatch - // error. + // the same token, but change a parameter, AWS CodeBuild returns a parameter + // mismatch error. IdempotencyToken *string `locationName:"idempotencyToken" type:"string"` } @@ -12110,6 +12162,38 @@ func (s *RetryBuildOutput) SetBuild(v *Build) *RetryBuildOutput { type S3LogsConfig struct { _ struct{} `type:"structure"` + // Specifies the access for objects that are uploaded to an Amazon S3 bucket + // that is owned by another account. + // + // By default, only the account that uploads the objects to the bucket has access + // to these objects. This property allows you to give the bucket owner access + // to these objects. + // + // NONE + // + // The bucket owner does not have access to the objects. This is the default. + // + // READ_ONLY + // + // The bucket owner has read only access to the objects. The uploading account + // retains ownership of the objects. + // + // FULL + // + // The bucket owner has full access to the objects. Object ownership is determined + // by the following criteria: + // + // * If the bucket is configured with the Bucket owner preferred setting, + // the bucket owner owns the objects. The uploading account will have object + // access as specified by the bucket's policy. + // + // * Otherwise, the uploading account retains ownership of the objects. + // + // For more information about Amazon S3 object ownership, see Controlling ownership + // of uploaded objects using S3 Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon Simple Storage Service User Guide. + BucketOwnerAccess *string `locationName:"bucketOwnerAccess" type:"string" enum:"BucketOwnerAccess"` + // Set to true if you do not want your S3 build log output encrypted. By default // S3 build logs are encrypted. EncryptionDisabled *bool `locationName:"encryptionDisabled" type:"boolean"` @@ -12152,6 +12236,12 @@ func (s *S3LogsConfig) Validate() error { return nil } +// SetBucketOwnerAccess sets the BucketOwnerAccess field's value. +func (s *S3LogsConfig) SetBucketOwnerAccess(v string) *S3LogsConfig { + s.BucketOwnerAccess = &v + return s +} + // SetEncryptionDisabled sets the EncryptionDisabled field's value. func (s *S3LogsConfig) SetEncryptionDisabled(v bool) *S3LogsConfig { s.EncryptionDisabled = &v @@ -12177,9 +12267,9 @@ type S3ReportExportConfig struct { // The name of the S3 bucket where the raw data of a report are exported. Bucket *string `locationName:"bucket" min:"1" type:"string"` - // The Amazon Web Services account identifier of the owner of the Amazon S3 - // bucket. This allows report data to be exported to an Amazon S3 bucket that - // is owned by an account other than the account running the build. + // The AWS account identifier of the owner of the Amazon S3 bucket. This allows + // report data to be exported to an Amazon S3 bucket that is owned by an account + // other than the account running the build. BucketOwner *string `locationName:"bucketOwner" type:"string"` // A boolean value that specifies if the results of a report are encrypted. @@ -12190,10 +12280,11 @@ type S3ReportExportConfig struct { // The type of build output artifact to create. Valid values include: // - // * NONE: CodeBuild creates the raw data in the output bucket. This is the - // default if packaging is not specified. + // * NONE: AWS CodeBuild creates the raw data in the output bucket. This + // is the default if packaging is not specified. // - // * ZIP: CodeBuild creates a ZIP file with the raw data in the output bucket. + // * ZIP: AWS CodeBuild creates a ZIP file with the raw data in the output + // bucket. Packaging *string `locationName:"packaging" type:"string" enum:"ReportPackagingType"` // The path to the exported report's raw data results. @@ -12262,10 +12353,10 @@ func (s *S3ReportExportConfig) SetPath(v string) *S3ReportExportConfig { return s } -// Information about the authorization settings for CodeBuild to access the -// source code to be built. +// Information about the authorization settings for AWS CodeBuild to access +// the source code to be built. // -// This information is for the CodeBuild console's use only. Your code should +// This information is for the AWS CodeBuild console's use only. Your code should // not get or set this information directly. type SourceAuth struct { _ struct{} `type:"structure"` @@ -12383,7 +12474,7 @@ type StartBuildBatchInput struct { // If this value is set, it can be either an inline buildspec definition, the // path to an alternate buildspec file relative to the value of the built-in // CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The - // bucket must be in the same Region as the build project. Specify the buildspec + // bucket must be in the same AWS Region as the build project. Specify the buildspec // file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). // If this value is not provided or is set to an empty string, the source code // must contain a buildspec file in its root directory. For more information, @@ -12406,9 +12497,9 @@ type StartBuildBatchInput struct { // Batch session debugging is not supported for matrix batch builds. DebugSessionEnabled *bool `locationName:"debugSessionEnabled" type:"boolean"` - // The Key Management Service customer master key (CMK) that overrides the one - // specified in the batch build project. The CMK key encrypts the build output - // artifacts. + // The AWS Key Management Service (AWS KMS) customer master key (CMK) that overrides + // the one specified in the batch build project. The CMK key encrypts the build + // output artifacts. // // You can use a cross-account KMS key to encrypt the build output artifacts // if your service role has permission to that key. @@ -12437,28 +12528,28 @@ type StartBuildBatchInput struct { // A unique, case sensitive identifier you provide to ensure the idempotency // of the StartBuildBatch request. The token is included in the StartBuildBatch // request and is valid for five minutes. If you repeat the StartBuildBatch - // request with the same token, but change a parameter, CodeBuild returns a - // parameter mismatch error. + // request with the same token, but change a parameter, AWS CodeBuild returns + // a parameter mismatch error. IdempotencyToken *string `locationName:"idempotencyToken" type:"string"` // The name of an image for this batch build that overrides the one specified // in the batch build project. ImageOverride *string `locationName:"imageOverride" min:"1" type:"string"` - // The type of credentials CodeBuild uses to pull images in your batch build. + // The type of credentials AWS CodeBuild uses to pull images in your batch build. // There are two valid values: // // CODEBUILD // - // Specifies that CodeBuild uses its own credentials. This requires that you - // modify your ECR repository policy to trust CodeBuild's service principal. + // Specifies that AWS CodeBuild uses its own credentials. This requires that + // you modify your ECR repository policy to trust AWS CodeBuild's service principal. // // SERVICE_ROLE // - // Specifies that CodeBuild uses your build project's service role. + // Specifies that AWS CodeBuild uses your build project's service role. // // When using a cross-account or private registry image, you must use SERVICE_ROLE - // credentials. When using an CodeBuild curated image, you must use CODEBUILD + // credentials. When using an AWS CodeBuild curated image, you must use CODEBUILD // credentials. ImagePullCredentialsTypeOverride *string `locationName:"imagePullCredentialsTypeOverride" type:"string" enum:"ImagePullCredentialsType"` @@ -12530,7 +12621,7 @@ type StartBuildBatchInput struct { // not specified, the latest version is used. If specified, the contents depends // on the source provider: // - // CodeCommit + // AWS CodeCommit // // The commit ID, branch, or Git tag to use. // @@ -12558,7 +12649,7 @@ type StartBuildBatchInput struct { // (at the build level) takes precedence. // // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. SourceVersion *string `locationName:"sourceVersion" type:"string"` } @@ -12905,7 +12996,7 @@ type StartBuildInput struct { // If this value is set, it can be either an inline buildspec definition, the // path to an alternate buildspec file relative to the value of the built-in // CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The - // bucket must be in the same Region as the build project. Specify the buildspec + // bucket must be in the same AWS Region as the build project. Specify the buildspec // file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). // If this value is not provided or is set to an empty string, the source code // must contain a buildspec file in its root directory. For more information, @@ -12928,8 +13019,9 @@ type StartBuildInput struct { // see Viewing a running build in Session Manager (https://docs.aws.amazon.com/codebuild/latest/userguide/session-manager.html). DebugSessionEnabled *bool `locationName:"debugSessionEnabled" type:"boolean"` - // The Key Management Service customer master key (CMK) that overrides the one - // specified in the build project. The CMK key encrypts the build output artifacts. + // The AWS Key Management Service (AWS KMS) customer master key (CMK) that overrides + // the one specified in the build project. The CMK key encrypts the build output + // artifacts. // // You can use a cross-account KMS key to encrypt the build output artifacts // if your service role has permission to that key. @@ -12950,14 +13042,14 @@ type StartBuildInput struct { // for this build only, any previous depth of history defined in the build project. GitCloneDepthOverride *int64 `locationName:"gitCloneDepthOverride" type:"integer"` - // Information about the Git submodules configuration for this build of an CodeBuild - // build project. + // Information about the Git submodules configuration for this build of an AWS + // CodeBuild build project. GitSubmodulesConfigOverride *GitSubmodulesConfig `locationName:"gitSubmodulesConfigOverride" type:"structure"` // A unique, case sensitive identifier you provide to ensure the idempotency // of the StartBuild request. The token is included in the StartBuild request // and is valid for 5 minutes. If you repeat the StartBuild request with the - // same token, but change a parameter, CodeBuild returns a parameter mismatch + // same token, but change a parameter, AWS CodeBuild returns a parameter mismatch // error. IdempotencyToken *string `locationName:"idempotencyToken" type:"string"` @@ -12965,20 +13057,20 @@ type StartBuildInput struct { // build project. ImageOverride *string `locationName:"imageOverride" min:"1" type:"string"` - // The type of credentials CodeBuild uses to pull images in your build. There - // are two valid values: + // The type of credentials AWS CodeBuild uses to pull images in your build. + // There are two valid values: // // CODEBUILD // - // Specifies that CodeBuild uses its own credentials. This requires that you - // modify your ECR repository policy to trust CodeBuild's service principal. + // Specifies that AWS CodeBuild uses its own credentials. This requires that + // you modify your ECR repository policy to trust AWS CodeBuild's service principal. // // SERVICE_ROLE // - // Specifies that CodeBuild uses your build project's service role. + // Specifies that AWS CodeBuild uses your build project's service role. // // When using a cross-account or private registry image, you must use SERVICE_ROLE - // credentials. When using an CodeBuild curated image, you must use CODEBUILD + // credentials. When using an AWS CodeBuild curated image, you must use CODEBUILD // credentials. ImagePullCredentialsTypeOverride *string `locationName:"imagePullCredentialsTypeOverride" type:"string" enum:"ImagePullCredentialsType"` @@ -12995,7 +13087,7 @@ type StartBuildInput struct { // Enable this flag to override privileged mode in the build project. PrivilegedModeOverride *bool `locationName:"privilegedModeOverride" type:"boolean"` - // The name of the CodeBuild build project to start running a build. + // The name of the AWS CodeBuild build project to start running a build. // // ProjectName is a required field ProjectName *string `locationName:"projectName" min:"1" type:"string" required:"true"` @@ -13014,7 +13106,7 @@ type StartBuildInput struct { // with the source provider must have write access to the repo. If the user // does not have write access, the build status cannot be updated. For more // information, see Source provider access (https://docs.aws.amazon.com/codebuild/latest/userguide/access-tokens.html) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. // // The status of a build triggered by a webhook is always reported to your source // provider. @@ -13051,7 +13143,7 @@ type StartBuildInput struct { // the latest version is used. If specified, the contents depends on the source // provider: // - // CodeCommit + // AWS CodeCommit // // The commit ID, branch, or Git tag to use. // @@ -13079,7 +13171,7 @@ type StartBuildInput struct { // (at the build level) takes precedence. // // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. SourceVersion *string `locationName:"sourceVersion" type:"string"` // The number of build timeout minutes, from 5 to 480 (8 hours), that overrides, @@ -13537,8 +13629,7 @@ func (s *StopBuildOutput) SetBuild(v *Build) *StopBuildOutput { // A tag, consisting of a key and a value. // -// This tag is available for use by Amazon Web Services services that support -// tags in CodeBuild. +// This tag is available for use by AWS services that support tags in AWS CodeBuild. type Tag struct { _ struct{} `type:"structure"` @@ -13803,8 +13894,8 @@ type UpdateProjectInput struct { // A new or replacement description of the build project. Description *string `locationName:"description" type:"string"` - // The Key Management Service customer master key (CMK) to be used for encrypting - // the build output artifacts. + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the build output artifacts. // // You can use a cross-account KMS key to encrypt the build output artifacts // if your service role has permission to that key. @@ -13822,7 +13913,7 @@ type UpdateProjectInput struct { FileSystemLocations []*ProjectFileSystemLocation `locationName:"fileSystemLocations" type:"list"` // Information about logs for the build project. A project can create logs in - // CloudWatch Logs, logs in an S3 bucket, or both. + // Amazon CloudWatch Logs, logs in an S3 bucket, or both. LogsConfig *LogsConfig `locationName:"logsConfig" type:"structure"` // The name of the build project. @@ -13846,9 +13937,9 @@ type UpdateProjectInput struct { // An array of ProjectSource objects. SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` - // The replacement ARN of the Identity and Access Management role that enables - // CodeBuild to interact with dependent Amazon Web Services services on behalf - // of the Amazon Web Services account. + // The replacement ARN of the AWS Identity and Access Management (IAM) role + // that enables AWS CodeBuild to interact with dependent AWS services on behalf + // of the AWS account. ServiceRole *string `locationName:"serviceRole" min:"1" type:"string"` // Information to be changed about the build input source code for the build @@ -13858,7 +13949,7 @@ type UpdateProjectInput struct { // A version of the build input to be built for this project. If not specified, // the latest version is used. If specified, it must be one of: // - // * For CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // // * For GitHub: the commit ID, pull request ID, branch name, or tag name // that corresponds to the version of the source code you want to build. @@ -13879,20 +13970,20 @@ type UpdateProjectInput struct { // precedence over this sourceVersion (at the project level). // // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the CodeBuild User Guide. + // in the AWS CodeBuild User Guide. SourceVersion *string `locationName:"sourceVersion" type:"string"` // An updated list of tag key and value pairs associated with this build project. // - // These tags are available for use by Amazon Web Services services that support - // CodeBuild build project tags. + // These tags are available for use by AWS services that support AWS CodeBuild + // build project tags. Tags []*Tag `locationName:"tags" type:"list"` - // The replacement value in minutes, from 5 to 480 (8 hours), for CodeBuild + // The replacement value in minutes, from 5 to 480 (8 hours), for AWS CodeBuild // to wait before timing out any related build that did not get marked as completed. TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" min:"5" type:"integer"` - // VpcConfig enables CodeBuild to access resources in an Amazon VPC. + // VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC. VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` } @@ -14175,8 +14266,8 @@ type UpdateReportGroupInput struct { // An updated list of tag key and value pairs associated with this report group. // - // These tags are available for use by Amazon Web Services services that support - // CodeBuild report group tags. + // These tags are available for use by AWS services that support AWS CodeBuild + // report group tags. Tags []*Tag `locationName:"tags" type:"list"` } @@ -14281,7 +14372,7 @@ type UpdateWebhookInput struct { // WebhookFilter. FilterGroups [][]*WebhookFilter `locationName:"filterGroups" type:"list"` - // The name of the CodeBuild project. + // The name of the AWS CodeBuild project. // // ProjectName is a required field ProjectName *string `locationName:"projectName" min:"2" type:"string" required:"true"` @@ -14352,7 +14443,7 @@ type UpdateWebhookOutput struct { _ struct{} `type:"structure"` // Information about a repository's webhook that is associated with a project - // in CodeBuild. + // in AWS CodeBuild. Webhook *Webhook `locationName:"webhook" type:"structure"` } @@ -14372,7 +14463,7 @@ func (s *UpdateWebhookOutput) SetWebhook(v *Webhook) *UpdateWebhookOutput { return s } -// Information about the VPC configuration that CodeBuild accesses. +// Information about the VPC configuration that AWS CodeBuild accesses. type VpcConfig struct { _ struct{} `type:"structure"` @@ -14428,7 +14519,7 @@ func (s *VpcConfig) SetVpcId(v string) *VpcConfig { } // Information about a webhook that connects repository events to a build project -// in CodeBuild. +// in AWS CodeBuild. type Webhook struct { _ struct{} `type:"structure"` @@ -14455,7 +14546,7 @@ type Webhook struct { // modified. LastModifiedSecret *time.Time `locationName:"lastModifiedSecret" type:"timestamp"` - // The CodeBuild endpoint where webhook events are sent. + // The AWS CodeBuild endpoint where webhook events are sent. PayloadUrl *string `locationName:"payloadUrl" min:"1" type:"string"` // The secret token of the associated repository. @@ -14697,6 +14788,56 @@ func AuthType_Values() []string { } } +// Specifies the access for objects that are uploaded to an Amazon S3 bucket +// that is owned by another account. +// +// By default, only the account that uploads the objects to the bucket has access +// to these objects. This property allows you to give the bucket owner access +// to these objects. +// +// NONE +// +// The bucket owner does not have access to the objects. This is the default. +// +// READ_ONLY +// +// The bucket owner has read only access to the objects. The uploading account +// retains ownership of the objects. +// +// FULL +// +// The bucket owner has full access to the objects. Object ownership is determined +// by the following criteria: +// +// * If the bucket is configured with the Bucket owner preferred setting, +// the bucket owner owns the objects. The uploading account will have object +// access as specified by the bucket's policy. +// +// * Otherwise, the uploading account retains ownership of the objects. +// +// For more information about Amazon S3 object ownership, see Controlling ownership +// of uploaded objects using S3 Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon Simple Storage Service User Guide. +const ( + // BucketOwnerAccessNone is a BucketOwnerAccess enum value + BucketOwnerAccessNone = "NONE" + + // BucketOwnerAccessReadOnly is a BucketOwnerAccess enum value + BucketOwnerAccessReadOnly = "READ_ONLY" + + // BucketOwnerAccessFull is a BucketOwnerAccess enum value + BucketOwnerAccessFull = "FULL" +) + +// BucketOwnerAccess_Values returns all elements of the BucketOwnerAccess enum +func BucketOwnerAccess_Values() []string { + return []string{ + BucketOwnerAccessNone, + BucketOwnerAccessReadOnly, + BucketOwnerAccessFull, + } +} + const ( // BuildBatchPhaseTypeSubmitted is a BuildBatchPhaseType enum value BuildBatchPhaseTypeSubmitted = "SUBMITTED" diff --git a/service/codebuild/doc.go b/service/codebuild/doc.go index 75ec93ed80..fe25ba1d2f 100644 --- a/service/codebuild/doc.go +++ b/service/codebuild/doc.go @@ -3,15 +3,16 @@ // Package codebuild provides the client and types for making API // requests to AWS CodeBuild. // -// CodeBuild is a fully managed build service in the cloud. CodeBuild compiles -// your source code, runs unit tests, and produces artifacts that are ready -// to deploy. CodeBuild eliminates the need to provision, manage, and scale -// your own build servers. It provides prepackaged build environments for the -// most popular programming languages and build tools, such as Apache Maven, -// Gradle, and more. You can also fully customize build environments in CodeBuild -// to use your own build tools. CodeBuild scales automatically to meet peak -// build requests. You pay only for the build time you consume. For more information -// about CodeBuild, see the CodeBuild User Guide (https://docs.aws.amazon.com/codebuild/latest/userguide/welcome.html). +// AWS CodeBuild is a fully managed build service in the cloud. AWS CodeBuild +// compiles your source code, runs unit tests, and produces artifacts that are +// ready to deploy. AWS CodeBuild eliminates the need to provision, manage, +// and scale your own build servers. It provides prepackaged build environments +// for the most popular programming languages and build tools, such as Apache +// Maven, Gradle, and more. You can also fully customize build environments +// in AWS CodeBuild to use your own build tools. AWS CodeBuild scales automatically +// to meet peak build requests. You pay only for the build time you consume. +// For more information about AWS CodeBuild, see the AWS CodeBuild User Guide +// (https://docs.aws.amazon.com/codebuild/latest/userguide/welcome.html). // // See https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06 for more information on this service. // diff --git a/service/codebuild/errors.go b/service/codebuild/errors.go index b727d90d5f..1f4a4d3b92 100644 --- a/service/codebuild/errors.go +++ b/service/codebuild/errors.go @@ -11,8 +11,7 @@ const ( // ErrCodeAccountLimitExceededException for service response error code // "AccountLimitExceededException". // - // An Amazon Web Services service limit was exceeded for the calling Amazon - // Web Services account. + // An AWS service limit was exceeded for the calling AWS account. ErrCodeAccountLimitExceededException = "AccountLimitExceededException" // ErrCodeInvalidInputException for service response error code @@ -30,14 +29,14 @@ const ( // ErrCodeResourceAlreadyExistsException for service response error code // "ResourceAlreadyExistsException". // - // The specified Amazon Web Services resource cannot be created, because an - // Amazon Web Services resource with the same settings already exists. + // The specified AWS resource cannot be created, because an AWS resource with + // the same settings already exists. ErrCodeResourceAlreadyExistsException = "ResourceAlreadyExistsException" // ErrCodeResourceNotFoundException for service response error code // "ResourceNotFoundException". // - // The specified Amazon Web Services resource cannot be found. + // The specified AWS resource cannot be found. ErrCodeResourceNotFoundException = "ResourceNotFoundException" ) diff --git a/service/elbv2/api.go b/service/elbv2/api.go index 7fc722fb07..babd65433a 100644 --- a/service/elbv2/api.go +++ b/service/elbv2/api.go @@ -6761,6 +6761,15 @@ type LoadBalancerAttribute struct { // HTTP headers with invalid header fields are removed by the load balancer // (true) or routed to targets (false). The default is false. // + // * Indicates whether the two headers (x-amzn-tls-version and x-amzn-tls-cipher-suite), + // which contain information about the negotiated TLS version and cipher + // suite, are added to the client request before sending it to the target. + // The x-amzn-tls-version header has information about the TLS protocol version + // negotiated with the client, and the x-amzn-tls-cipher-suite header has + // information about the cipher suite negotiated with the client. Both headers + // are in OpenSSL format. The possible values for the attribute are true + // and false. The default is false. + // // * routing.http2.enabled - Indicates whether HTTP/2 is enabled. The value // is true or false. The default is true. Elastic Load Balancing requires // that message header names contain only alphanumeric characters and hyphens. @@ -7298,12 +7307,9 @@ type ModifyTargetGroupInput struct { HealthCheckPort *string `type:"string"` // The protocol the load balancer uses when performing health checks on targets. - // For Application Load Balancers, the default is HTTP. For Network Load Balancers - // and Gateway Load Balancers, the default is TCP. The TCP protocol is not supported - // for health checks if the protocol of the target group is HTTP or HTTPS. It - // is supported for health checks only if the protocol of the target group is - // TCP, TLS, UDP, or TCP_UDP. The GENEVE, TLS, UDP, and TCP_UDP protocols are - // not supported for health checks. + // The TCP protocol is supported for health checks only if the protocol of the + // target group is TCP, TLS, UDP, or TCP_UDP. The GENEVE, TLS, UDP, and TCP_UDP + // protocols are not supported for health checks. // // With Network Load Balancers, you can't modify this setting. HealthCheckProtocol *string `type:"string" enum:"ProtocolEnum"` diff --git a/service/emr/api.go b/service/emr/api.go index 5ea540fcd6..03b2781080 100644 --- a/service/emr/api.go +++ b/service/emr/api.go @@ -415,7 +415,9 @@ func (c *EMR) CancelStepsRequest(input *CancelStepsInput) (req *request.Request, // EMR versions 4.8.0 and later, excluding version 5.0.0. A maximum of 256 steps // are allowed in each CancelSteps request. CancelSteps is idempotent but asynchronous; // it does not guarantee that a step will be canceled, even if the request is -// successfully submitted. You can only cancel steps that are in a PENDING state. +// successfully submitted. When you use Amazon EMR versions 5.28.0 and later, +// you can cancel steps that are in a PENDING or RUNNING state. In earlier versions +// of Amazon EMR, you can only cancel steps that are in a PENDING state. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1233,6 +1235,92 @@ func (c *EMR) DescribeNotebookExecutionWithContext(ctx aws.Context, input *Descr return out, req.Send() } +const opDescribeReleaseLabel = "DescribeReleaseLabel" + +// DescribeReleaseLabelRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReleaseLabel operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeReleaseLabel for more information on using the DescribeReleaseLabel +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeReleaseLabelRequest method. +// req, resp := client.DescribeReleaseLabelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/DescribeReleaseLabel +func (c *EMR) DescribeReleaseLabelRequest(input *DescribeReleaseLabelInput) (req *request.Request, output *DescribeReleaseLabelOutput) { + op := &request.Operation{ + Name: opDescribeReleaseLabel, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReleaseLabelInput{} + } + + output = &DescribeReleaseLabelOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeReleaseLabel API operation for Amazon Elastic MapReduce. +// +// Provides EMR release label details, such as releases available the region +// where the API request is run, and the available applications for a specific +// EMR release label. Can also list EMR release versions that support a specified +// version of Spark. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation DescribeReleaseLabel for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// This exception occurs when there is an internal failure in the Amazon EMR +// service. +// +// * InvalidRequestException +// This exception occurs when there is something wrong with user input. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/DescribeReleaseLabel +func (c *EMR) DescribeReleaseLabel(input *DescribeReleaseLabelInput) (*DescribeReleaseLabelOutput, error) { + req, out := c.DescribeReleaseLabelRequest(input) + return out, req.Send() +} + +// DescribeReleaseLabelWithContext is the same as DescribeReleaseLabel with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReleaseLabel for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) DescribeReleaseLabelWithContext(ctx aws.Context, input *DescribeReleaseLabelInput, opts ...request.Option) (*DescribeReleaseLabelOutput, error) { + req, out := c.DescribeReleaseLabelRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeSecurityConfiguration = "DescribeSecurityConfiguration" // DescribeSecurityConfigurationRequest generates a "aws/request.Request" representing the @@ -1528,7 +1616,7 @@ func (c *EMR) GetBlockPublicAccessConfigurationRequest(input *GetBlockPublicAcce // GetBlockPublicAccessConfiguration API operation for Amazon Elastic MapReduce. // -// Returns the Amazon EMR block public access configuration for your AWS account +// Returns the Amazon EMR block public access configuration for your account // in the current Region. For more information see Configure Block Public Access // for Amazon EMR (https://docs.aws.amazon.com/emr/latest/ManagementGuide/configure-block-public-access.html) // in the Amazon EMR Management Guide. @@ -1919,11 +2007,11 @@ func (c *EMR) ListClustersRequest(input *ListClustersInput) (req *request.Reques // ListClusters API operation for Amazon Elastic MapReduce. // -// Provides the status of all clusters visible to this AWS account. Allows you -// to filter the list of clusters based on certain criteria; for example, filtering +// Provides the status of all clusters visible to this account. Allows you to +// filter the list of clusters based on certain criteria; for example, filtering // by cluster creation date and time or by status. This call returns a maximum -// of 50 clusters per call, but returns a marker to track the paging of the -// cluster list across multiple ListClusters calls. +// of 50 clusters in unsorted order per call, but returns a marker to track +// the paging of the cluster list across multiple ListClusters calls. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2587,6 +2675,147 @@ func (c *EMR) ListNotebookExecutionsPagesWithContext(ctx aws.Context, input *Lis return p.Err() } +const opListReleaseLabels = "ListReleaseLabels" + +// ListReleaseLabelsRequest generates a "aws/request.Request" representing the +// client's request for the ListReleaseLabels operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListReleaseLabels for more information on using the ListReleaseLabels +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListReleaseLabelsRequest method. +// req, resp := client.ListReleaseLabelsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/ListReleaseLabels +func (c *EMR) ListReleaseLabelsRequest(input *ListReleaseLabelsInput) (req *request.Request, output *ListReleaseLabelsOutput) { + op := &request.Operation{ + Name: opListReleaseLabels, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListReleaseLabelsInput{} + } + + output = &ListReleaseLabelsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListReleaseLabels API operation for Amazon Elastic MapReduce. +// +// Retrieves release labels of EMR services in the region where the API is called. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation ListReleaseLabels for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// This exception occurs when there is an internal failure in the Amazon EMR +// service. +// +// * InvalidRequestException +// This exception occurs when there is something wrong with user input. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/ListReleaseLabels +func (c *EMR) ListReleaseLabels(input *ListReleaseLabelsInput) (*ListReleaseLabelsOutput, error) { + req, out := c.ListReleaseLabelsRequest(input) + return out, req.Send() +} + +// ListReleaseLabelsWithContext is the same as ListReleaseLabels with the addition of +// the ability to pass a context and additional request options. +// +// See ListReleaseLabels for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) ListReleaseLabelsWithContext(ctx aws.Context, input *ListReleaseLabelsInput, opts ...request.Option) (*ListReleaseLabelsOutput, error) { + req, out := c.ListReleaseLabelsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListReleaseLabelsPages iterates over the pages of a ListReleaseLabels operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListReleaseLabels method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListReleaseLabels operation. +// pageNum := 0 +// err := client.ListReleaseLabelsPages(params, +// func(page *emr.ListReleaseLabelsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EMR) ListReleaseLabelsPages(input *ListReleaseLabelsInput, fn func(*ListReleaseLabelsOutput, bool) bool) error { + return c.ListReleaseLabelsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListReleaseLabelsPagesWithContext same as ListReleaseLabelsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) ListReleaseLabelsPagesWithContext(ctx aws.Context, input *ListReleaseLabelsInput, fn func(*ListReleaseLabelsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListReleaseLabelsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListReleaseLabelsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListReleaseLabelsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListSecurityConfigurations = "ListSecurityConfigurations" // ListSecurityConfigurationsRequest generates a "aws/request.Request" representing the @@ -2782,8 +3011,10 @@ func (c *EMR) ListStepsRequest(input *ListStepsInput) (req *request.Request, out // ListSteps API operation for Amazon Elastic MapReduce. // // Provides a list of steps for the cluster in reverse order unless you specify -// stepIds with the request of filter by StepStates. You can specify a maximum -// of 10 stepIDs. +// stepIds with the request or filter by StepStates. You can specify a maximum +// of 10 stepIDs. The CLI automatically paginates results to return a list greater +// than 50 steps. To return more than 50 steps using the CLI, specify a Marker, +// which is a pagination token that indicates the next set of steps to retrieve. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3066,9 +3297,9 @@ func (c *EMR) ListStudiosRequest(input *ListStudiosInput) (req *request.Request, // ListStudios API operation for Amazon Elastic MapReduce. // -// Returns a list of all Amazon EMR Studios associated with the AWS account. -// The list includes details such as ID, Studio Access URL, and creation time -// for each Studio. +// Returns a list of all Amazon EMR Studios associated with the account. The +// list includes details such as ID, Studio Access URL, and creation time for +// each Studio. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3539,8 +3770,8 @@ func (c *EMR) PutBlockPublicAccessConfigurationRequest(input *PutBlockPublicAcce // PutBlockPublicAccessConfiguration API operation for Amazon Elastic MapReduce. // // Creates or updates an Amazon EMR block public access configuration for your -// AWS account in the current Region. For more information see Configure Block -// Public Access for Amazon EMR (https://docs.aws.amazon.com/emr/latest/ManagementGuide/configure-block-public-access.html) +// account in the current Region. For more information see Configure Block Public +// Access for Amazon EMR (https://docs.aws.amazon.com/emr/latest/ManagementGuide/configure-block-public-access.html) // in the Amazon EMR Management Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4145,15 +4376,18 @@ func (c *EMR) SetVisibleToAllUsersRequest(input *SetVisibleToAllUsersInput) (req // SetVisibleToAllUsers API operation for Amazon Elastic MapReduce. // -// Sets the Cluster$VisibleToAllUsers value, which determines whether the cluster -// is visible to all IAM users of the AWS account associated with the cluster. -// Only the IAM user who created the cluster or the AWS account root user can -// call this action. The default value, true, indicates that all IAM users in -// the AWS account can perform cluster actions if they have the proper IAM policy -// permissions. If set to false, only the IAM user that created the cluster -// can perform actions. This action works on running clusters. You can override -// the default true setting when you create a cluster by using the VisibleToAllUsers -// parameter with RunJobFlow. +// Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When true, IAM +// principals in the account can perform EMR cluster actions that their IAM +// policies allow. When false, only the IAM principal that created the cluster +// and the account root user can perform EMR actions on the cluster, regardless +// of IAM permissions policies attached to other IAM principals. +// +// This action works on running clusters. When you create a cluster, use the +// RunJobFlowInput$VisibleToAllUsers parameter. +// +// For more information, see Understanding the EMR Cluster VisibleToAllUsers +// Setting (https://docs.aws.amazon.com/emr/latest/ManagementGuide/security_iam_emr-with-iam.html#security_set_visible_to_all_users) +// in the Amazon EMR Management Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5302,10 +5536,11 @@ func (s *BlockPublicAccessConfiguration) SetPermittedPublicSecurityGroupRuleRang return s } -// Properties that describe the AWS principal that created the BlockPublicAccessConfiguration -// using the PutBlockPublicAccessConfiguration action as well as the date and -// time that the configuration was created. Each time a configuration for block -// public access is updated, Amazon EMR updates this metadata. +// Properties that describe the Amazon Web Services principal that created the +// BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration +// action as well as the date and time that the configuration was created. Each +// time a configuration for block public access is updated, Amazon EMR updates +// this metadata. type BlockPublicAccessConfigurationMetadata struct { _ struct{} `type:"structure"` @@ -5744,9 +5979,8 @@ type Cluster struct { // in the Amazon EMR Management Guide. KerberosAttributes *KerberosAttributes `type:"structure"` - // The AWS KMS customer master key (CMK) used for encrypting log files. This - // attribute is only available with EMR version 5.30.0 and later, excluding - // EMR 6.0.0. + // The KMS key used for encrypting log files. This attribute is only available + // with EMR version 5.30.0 and later, excluding EMR 6.0.0. LogEncryptionKmsKeyId *string `type:"string"` // The path to the Amazon S3 location where logs for this cluster are stored. @@ -5811,8 +6045,8 @@ type Cluster struct { // The name of the security configuration applied to the cluster. SecurityConfiguration *string `type:"string"` - // The IAM role that will be assumed by the Amazon EMR service to access AWS - // resources on your behalf. + // The IAM role that will be assumed by the Amazon EMR service to access Amazon + // Web Services resources on your behalf. ServiceRole *string `type:"string"` // The current status details about the cluster. @@ -5829,14 +6063,21 @@ type Cluster struct { // of a cluster error. TerminationProtected *bool `type:"boolean"` - // Indicates whether the cluster is visible to all IAM users of the AWS account - // associated with the cluster. The default value, true, indicates that all - // IAM users in the AWS account can perform cluster actions if they have the - // proper IAM policy permissions. If this value is false, only the IAM user - // that created the cluster can perform actions. This value can be changed on - // a running cluster by using the SetVisibleToAllUsers action. You can override - // the default value of true when you create a cluster by using the VisibleToAllUsers - // parameter of the RunJobFlow action. + // Indicates whether the cluster is visible to IAM principals in the account + // associated with the cluster. When true, IAM principals in the account can + // perform EMR cluster actions on the cluster that their IAM policies allow. + // When false, only the IAM principal that created the cluster and the account + // root user can perform EMR actions, regardless of IAM permissions policies + // attached to other IAM principals. + // + // The default value is false if a value is not provided when creating a cluster + // using the EMR API RunJobFlow command or the CLI create-cluster (https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html) + // command. The default value is true when a cluster is created using the Management + // Console. IAM principals that are allowed to perform actions on the cluster + // can use the SetVisibleToAllUsers action to change the value on a running + // cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers + // Setting (https://docs.aws.amazon.com/emr/latest/ManagementGuide/security_iam_emr-with-iam.html#security_set_visible_to_all_users) + // in the Amazon EMR Management Guide. VisibleToAllUsers *bool `type:"boolean"` } @@ -6539,7 +6780,8 @@ type CreateStudioInput struct { Name *string `type:"string" required:"true"` // The IAM role that will be assumed by the Amazon EMR Studio. The service role - // provides a way for Amazon EMR Studio to interoperate with other AWS services. + // provides a way for Amazon EMR Studio to interoperate with other Amazon Web + // Services services. // // ServiceRole is a required field ServiceRole *string `type:"string" required:"true"` @@ -6725,17 +6967,17 @@ func (s *CreateStudioOutput) SetUrl(v string) *CreateStudioOutput { type CreateStudioSessionMappingInput struct { _ struct{} `type:"structure"` - // The globally unique identifier (GUID) of the user or group from the AWS SSO - // Identity Store. For more information, see UserId (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId) + // The globally unique identifier (GUID) of the user or group from the Amazon + // Web Services SSO Identity Store. For more information, see UserId (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId) // and GroupId (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-GroupId) - // in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId - // must be specified. + // in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName + // or IdentityId must be specified. IdentityId *string `type:"string"` // The name of the user or group. For more information, see UserName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName) // and DisplayName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) - // in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId - // must be specified. + // in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName + // or IdentityId must be specified. IdentityName *string `type:"string"` // Specifies whether the identity to map to the Amazon EMR Studio is a user @@ -6745,8 +6987,9 @@ type CreateStudioSessionMappingInput struct { IdentityType *string `type:"string" required:"true" enum:"IdentityType"` // The Amazon Resource Name (ARN) for the session policy that will be applied - // to the user or group. Session policies refine Studio user permissions without - // the need to use multiple IAM user roles. + // to the user or group. You should specify the ARN for the session policy that + // you want to apply, not the ARN of your user role. For more information, see + // Create an EMR Studio User Role with Session Policies (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-studio-user-role.html). // // SessionPolicyArn is a required field SessionPolicyArn *string `type:"string" required:"true"` @@ -6940,15 +7183,15 @@ type DeleteStudioSessionMappingInput struct { // The globally unique identifier (GUID) of the user or group to remove from // the Amazon EMR Studio. For more information, see UserId (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId) // and GroupId (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-GroupId) - // in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId - // must be specified. + // in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName + // or IdentityId must be specified. IdentityId *string `type:"string"` // The name of the user name or group to remove from the Amazon EMR Studio. // For more information, see UserName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName) // and DisplayName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) - // in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId - // must be specified. + // in the Amazon Web Services SSO Store API Reference. Either IdentityName or + // IdentityId must be specified. IdentityName *string `type:"string"` // Specifies whether the identity to delete from the Amazon EMR Studio is a @@ -7226,6 +7469,102 @@ func (s *DescribeNotebookExecutionOutput) SetNotebookExecution(v *NotebookExecut return s } +type DescribeReleaseLabelInput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. Currently set to null. + MaxResults *int64 `min:"1" type:"integer"` + + // The pagination token. Reserved for future use. Currently set to null. + NextToken *string `type:"string"` + + // The target release label to be described. + ReleaseLabel *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReleaseLabelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReleaseLabelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReleaseLabelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReleaseLabelInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeReleaseLabelInput) SetMaxResults(v int64) *DescribeReleaseLabelInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeReleaseLabelInput) SetNextToken(v string) *DescribeReleaseLabelInput { + s.NextToken = &v + return s +} + +// SetReleaseLabel sets the ReleaseLabel field's value. +func (s *DescribeReleaseLabelInput) SetReleaseLabel(v string) *DescribeReleaseLabelInput { + s.ReleaseLabel = &v + return s +} + +type DescribeReleaseLabelOutput struct { + _ struct{} `type:"structure"` + + // The list of applications available for the target release label. Name is + // the name of the application. Version is the concise version of the application. + Applications []*SimplifiedApplication `type:"list"` + + // The pagination token. Reserved for future use. Currently set to null. + NextToken *string `type:"string"` + + // The target release label described in the response. + ReleaseLabel *string `type:"string"` +} + +// String returns the string representation +func (s DescribeReleaseLabelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReleaseLabelOutput) GoString() string { + return s.String() +} + +// SetApplications sets the Applications field's value. +func (s *DescribeReleaseLabelOutput) SetApplications(v []*SimplifiedApplication) *DescribeReleaseLabelOutput { + s.Applications = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeReleaseLabelOutput) SetNextToken(v string) *DescribeReleaseLabelOutput { + s.NextToken = &v + return s +} + +// SetReleaseLabel sets the ReleaseLabel field's value. +func (s *DescribeReleaseLabelOutput) SetReleaseLabel(v string) *DescribeReleaseLabelOutput { + s.ReleaseLabel = &v + return s +} + type DescribeSecurityConfigurationInput struct { _ struct{} `type:"structure"` @@ -7903,10 +8242,11 @@ type GetBlockPublicAccessConfigurationOutput struct { // BlockPublicAccessConfiguration is a required field BlockPublicAccessConfiguration *BlockPublicAccessConfiguration `type:"structure" required:"true"` - // Properties that describe the AWS principal that created the BlockPublicAccessConfiguration - // using the PutBlockPublicAccessConfiguration action as well as the date and - // time that the configuration was created. Each time a configuration for block - // public access is updated, Amazon EMR updates this metadata. + // Properties that describe the Amazon Web Services principal that created the + // BlockPublicAccessConfiguration using the PutBlockPublicAccessConfiguration + // action as well as the date and time that the configuration was created. Each + // time a configuration for block public access is updated, Amazon EMR updates + // this metadata. // // BlockPublicAccessConfigurationMetadata is a required field BlockPublicAccessConfigurationMetadata *BlockPublicAccessConfigurationMetadata `type:"structure" required:"true"` @@ -8002,15 +8342,15 @@ type GetStudioSessionMappingInput struct { // The globally unique identifier (GUID) of the user or group. For more information, // see UserId (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId) // and GroupId (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-GroupId) - // in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId - // must be specified. + // in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName + // or IdentityId must be specified. IdentityId *string `type:"string"` // The name of the user or group to fetch. For more information, see UserName // (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName) // and DisplayName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) - // in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId - // must be specified. + // in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName + // or IdentityId must be specified. IdentityName *string `type:"string"` // Specifies whether the identity to fetch is a user or a group. @@ -8229,7 +8569,7 @@ func (s *HadoopStepConfig) SetProperties(v map[string]*string) *HadoopStepConfig type Instance struct { _ struct{} `type:"structure"` - // The list of EBS volumes that are attached to this instance. + // The list of Amazon EBS volumes that are attached to this instance. EbsVolumes []*EbsVolume `type:"list"` // The unique identifier of the instance in Amazon EC2. @@ -8365,8 +8705,7 @@ type InstanceFleet struct { // or TASK. InstanceFleetType *string `type:"string" enum:"InstanceFleetType"` - // The specification for the instance types that comprise an instance fleet. - // Up to five unique instance specifications may be defined for each instance + // An array of specifications for the instance types that comprise an instance // fleet. InstanceTypeSpecifications []*InstanceTypeSpecification `type:"list"` @@ -8925,9 +9264,9 @@ type InstanceGroup struct { // // Amazon EMR releases 4.x or later. // - // The list of configurations supplied for an EMR cluster instance group. You - // can specify a separate configuration for each instance group (master, core, - // and task). + // The list of configurations supplied for an Amazon EMR cluster instance group. + // You can specify a separate configuration for each instance group (master, + // core, and task). Configurations []*Configuration `type:"list"` // The version number of the requested configuration specification for this @@ -9750,8 +10089,12 @@ func (s *InstanceTimeline) SetReadyDateTime(v time.Time) *InstanceTimeline { // An instance type configuration for each instance type in an instance fleet, // which determines the EC2 instances Amazon EMR attempts to provision to fulfill -// On-Demand and Spot target capacities. There can be a maximum of five instance -// type configurations in a fleet. +// On-Demand and Spot target capacities. When you use an allocation strategy, +// you can include a maximum of 30 instance type configurations for a fleet. +// For more information about how to use an allocation strategy, see Configure +// Instance Fleets (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-fleet.html). +// Without an allocation strategy, you may specify a maximum of five instance +// type configurations for a fleet. // // The instance fleet configuration is available only in Amazon EMR versions // 4.8.0 and later, excluding 5.0.x versions. @@ -9774,7 +10117,7 @@ type InstanceTypeConfig struct { // the cluster. Configurations []*Configuration `type:"list"` - // The configuration of Amazon Elastic Block Storage (Amazon EBS) attached to + // The configuration of Amazon Elastic Block Store (Amazon EBS) attached to // each instance as defined by InstanceType. EbsConfiguration *EbsConfiguration `type:"structure"` @@ -9878,7 +10221,7 @@ type InstanceTypeSpecification struct { // Amazon EMR. Configurations []*Configuration `type:"list"` - // The configuration of Amazon Elastic Block Storage (Amazon EBS) attached to + // The configuration of Amazon Elastic Block Store (Amazon EBS) attached to // each instance as defined by InstanceType. EbsBlockDevices []*EbsBlockDevice `type:"list"` @@ -10158,9 +10501,8 @@ type JobFlowDetail struct { // of the job flow assume this role. JobFlowRole *string `type:"string"` - // The AWS KMS customer master key (CMK) used for encrypting log files. This - // attribute is only available with EMR version 5.30.0 and later, excluding - // EMR 6.0.0. + // The KMS key used for encrypting log files. This attribute is only available + // with EMR version 5.30.0 and later, excluding EMR 6.0.0. LogEncryptionKmsKeyId *string `type:"string"` // The location in Amazon S3 where log files for the job are stored. @@ -10185,8 +10527,8 @@ type JobFlowDetail struct { // for versions of Amazon EMR earlier than 5.1.0. ScaleDownBehavior *string `type:"string" enum:"ScaleDownBehavior"` - // The IAM role that is assumed by the Amazon EMR service to access AWS resources - // on your behalf. + // The IAM role that is assumed by the Amazon EMR service to access Amazon Web + // Services resources on your behalf. ServiceRole *string `type:"string"` // A list of steps run by the job flow. @@ -10197,14 +10539,21 @@ type JobFlowDetail struct { // is empty. SupportedProducts []*string `type:"list"` - // Indicates whether the cluster is visible to all IAM users of the AWS account - // associated with the cluster. The default value, true, indicates that all - // IAM users in the AWS account can perform cluster actions if they have the - // proper IAM policy permissions. If this value is false, only the IAM user - // that created the cluster can perform actions. This value can be changed on - // a running cluster by using the SetVisibleToAllUsers action. You can override - // the default value of true when you create a cluster by using the VisibleToAllUsers - // parameter of the RunJobFlow action. + // Indicates whether the cluster is visible to IAM principals in the account + // associated with the cluster. When true, IAM principals in the account can + // perform EMR cluster actions that their IAM policies allow. When false, only + // the IAM principal that created the cluster and the account root user can + // perform EMR actions, regardless of IAM permissions policies attached to other + // IAM principals. + // + // The default value is false if a value is not provided when creating a cluster + // using the EMR API RunJobFlow command or the CLI create-cluster (https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html) + // command. The default value is true when a cluster is created using the Management + // Console. IAM principals that are authorized to perform actions on the cluster + // can use the SetVisibleToAllUsers action to change the value on a running + // cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers + // Setting (https://docs.aws.amazon.com/emr/latest/ManagementGuide/security_iam_emr-with-iam.html#security_set_visible_to_all_users) + // in the Amazon EMR Management Guide. VisibleToAllUsers *bool `type:"boolean"` } @@ -10416,10 +10765,12 @@ type JobFlowInstancesConfig struct { // 4.8.0 and later, excluding 5.0.x versions. Ec2SubnetIds []*string `type:"list"` - // The identifier of the Amazon EC2 security group for the master node. + // The identifier of the Amazon EC2 security group for the master node. If you + // specify EmrManagedMasterSecurityGroup, you must also specify EmrManagedSlaveSecurityGroup. EmrManagedMasterSecurityGroup *string `type:"string"` // The identifier of the Amazon EC2 security group for the core and task nodes. + // If you specify EmrManagedSlaveSecurityGroup, you must also specify EmrManagedMasterSecurityGroup. EmrManagedSlaveSecurityGroup *string `type:"string"` // Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop @@ -10445,7 +10796,9 @@ type JobFlowInstancesConfig struct { InstanceGroups []*InstanceGroupConfig `type:"list"` // Specifies whether the cluster should remain available after completing all - // steps. + // steps. Defaults to true. For more information about configuring cluster termination, + // see Control Cluster Termination (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-termination.html) + // in the EMR Management Guide. KeepJobFlowAliveWhenNoSteps *bool `type:"boolean"` // The EC2 instance type of the master node. @@ -10976,7 +11329,9 @@ func (s *ListBootstrapActionsOutput) SetMarker(v string) *ListBootstrapActionsOu type ListClustersInput struct { _ struct{} `type:"structure"` - // The cluster state filters to apply when listing clusters. + // The cluster state filters to apply when listing clusters. Clusters that change + // state while this action runs may be not be returned as expected in the list + // of clusters. ClusterStates []*string `type:"list"` // The creation date and time beginning value filter for listing clusters. @@ -11464,6 +11819,101 @@ func (s *ListNotebookExecutionsOutput) SetNotebookExecutions(v []*NotebookExecut return s } +type ListReleaseLabelsInput struct { + _ struct{} `type:"structure"` + + // Filters the results of the request. Prefix specifies the prefix of release + // labels to return. Application specifies the application (with/without version) + // of release labels to return. + Filters *ReleaseLabelFilter `type:"structure"` + + // Defines the maximum number of release labels to return in a single response. + // The default is 100. + MaxResults *int64 `min:"1" type:"integer"` + + // Specifies the next page of results. If NextToken is not specified, which + // is usually the case for the first request of ListReleaseLabels, the first + // page of results are determined by other filtering parameters or by the latest + // version. The ListReleaseLabels request fails if the identity (AWS AccountID) + // and all filtering parameters are different from the original request, or + // if the NextToken is expired or tampered with. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListReleaseLabelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReleaseLabelsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListReleaseLabelsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListReleaseLabelsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListReleaseLabelsInput) SetFilters(v *ReleaseLabelFilter) *ListReleaseLabelsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListReleaseLabelsInput) SetMaxResults(v int64) *ListReleaseLabelsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListReleaseLabelsInput) SetNextToken(v string) *ListReleaseLabelsInput { + s.NextToken = &v + return s +} + +type ListReleaseLabelsOutput struct { + _ struct{} `type:"structure"` + + // Used to paginate the next page of results if specified in the next ListReleaseLabels + // request. + NextToken *string `type:"string"` + + // The returned release labels. + ReleaseLabels []*string `type:"list"` +} + +// String returns the string representation +func (s ListReleaseLabelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReleaseLabelsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListReleaseLabelsOutput) SetNextToken(v string) *ListReleaseLabelsOutput { + s.NextToken = &v + return s +} + +// SetReleaseLabels sets the ReleaseLabels field's value. +func (s *ListReleaseLabelsOutput) SetReleaseLabels(v []*string) *ListReleaseLabelsOutput { + s.ReleaseLabels = v + return s +} + type ListSecurityConfigurationsInput struct { _ struct{} `type:"structure"` @@ -11530,7 +11980,10 @@ type ListStepsInput struct { // ClusterId is a required field ClusterId *string `type:"string" required:"true"` - // The pagination token that indicates the next set of results to retrieve. + // The maximum number of steps that a single ListSteps action returns is 50. + // To return a longer list of steps, use multiple ListSteps actions along with + // the Marker parameter, which is a pagination token that indicates the next + // set of results to retrieve. Marker *string `type:"string"` // The filter to limit the step list based on the identifier of the steps. You @@ -11594,7 +12047,10 @@ func (s *ListStepsInput) SetStepStates(v []*string) *ListStepsInput { type ListStepsOutput struct { _ struct{} `type:"structure"` - // The pagination token that indicates the next set of results to retrieve. + // The maximum number of steps that a single ListSteps action returns is 50. + // To return a longer list of steps, use multiple ListSteps actions along with + // the Marker parameter, which is a pagination token that indicates the next + // set of results to retrieve. Marker *string `type:"string"` // The filtered list of steps for the cluster. @@ -11846,7 +12302,9 @@ type ModifyClusterInput struct { ClusterId *string `type:"string" required:"true"` // The number of steps that can be executed concurrently. You can specify a - // minimum of 1 step and a maximum of 256 steps. + // minimum of 1 step and a maximum of 256 steps. We recommend that you do not + // change this parameter while steps are running or the ActionOnFailure setting + // may not behave as expected. For more information see Step$ActionOnFailure. StepConcurrencyLevel *int64 `type:"integer"` } @@ -11916,7 +12374,7 @@ type ModifyInstanceFleetInput struct { // ClusterId is a required field ClusterId *string `type:"string" required:"true"` - // The unique identifier of the instance fleet. + // The configuration parameters of the instance fleet. // // InstanceFleet is a required field InstanceFleet *InstanceFleetModifyConfig `type:"structure" required:"true"` @@ -12211,6 +12669,8 @@ func (s *NotebookExecution) SetTags(v []*Tag) *NotebookExecution { return s } +// Details for a notebook execution. The details include information such as +// the unique ID and status of the notebook execution. type NotebookExecutionSummary struct { _ struct{} `type:"structure"` @@ -12333,8 +12793,8 @@ type OnDemandCapacityReservationOptions struct { // target capacity is launched according to the On-Demand allocation strategy // (lowest-price). // - // If you do not specify a value, the fleet fulfils the On-Demand capacity according - // to the chosen On-Demand allocation strategy. + // If you do not specify a value, the fleet fulfills the On-Demand capacity + // according to the chosen On-Demand allocation strategy. UsageStrategy *string `type:"string" enum:"OnDemandCapacityReservationUsageStrategy"` } @@ -12844,6 +13304,39 @@ func (s PutManagedScalingPolicyOutput) GoString() string { return s.String() } +// The release label filters by application or version prefix. +type ReleaseLabelFilter struct { + _ struct{} `type:"structure"` + + // Optional release label application filter. For example, spark@2.1.0. + Application *string `type:"string"` + + // Optional release label version prefix filter. For example, emr-5. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ReleaseLabelFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReleaseLabelFilter) GoString() string { + return s.String() +} + +// SetApplication sets the Application field's value. +func (s *ReleaseLabelFilter) SetApplication(v string) *ReleaseLabelFilter { + s.Application = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReleaseLabelFilter) SetPrefix(v string) *ReleaseLabelFilter { + s.Prefix = &v + return s +} + type RemoveAutoScalingPolicyInput struct { _ struct{} `type:"structure"` @@ -13097,10 +13590,9 @@ type RunJobFlowInput struct { // in the Amazon EMR Management Guide. KerberosAttributes *KerberosAttributes `type:"structure"` - // The AWS KMS customer master key (CMK) used for encrypting log files. If a - // value is not provided, the logs remain encrypted by AES-256. This attribute - // is only available with Amazon EMR version 5.30.0 and later, excluding Amazon - // EMR 6.0.0. + // The KMS key used for encrypting log files. If a value is not provided, the + // logs remain encrypted by AES-256. This attribute is only available with Amazon + // EMR version 5.30.0 and later, excluding Amazon EMR 6.0.0. LogEncryptionKmsKeyId *string `type:"string"` // The location in Amazon S3 to write the log files of the job flow. If a value @@ -13181,8 +13673,8 @@ type RunJobFlowInput struct { // The name of a security configuration to apply to the cluster. SecurityConfiguration *string `type:"string"` - // The IAM role that will be assumed by the Amazon EMR service to access AWS - // resources on your behalf. + // The IAM role that will be assumed by the Amazon EMR service to access Amazon + // Web Services resources on your behalf. ServiceRole *string `type:"string"` // Specifies the number of steps that can be executed concurrently. The default @@ -13208,10 +13700,17 @@ type RunJobFlowInput struct { // A list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags []*Tag `type:"list"` - // A value of true indicates that all IAM users in the AWS account can perform - // cluster actions if they have the proper IAM policy permissions. This is the - // default. A value of false indicates that only the IAM user who created the - // cluster can perform actions. + // Set this value to true so that IAM principals in the account associated with + // the cluster can perform EMR actions on the cluster that their IAM policies + // allow. This value defaults to false for clusters created using the EMR API + // or the CLI create-cluster (https://docs.aws.amazon.com/cli/latest/reference/emr/create-cluster.html) + // command. + // + // When set to false, only the IAM principal that created the cluster and the + // account root user can perform EMR actions for the cluster, regardless of + // the IAM permissions policies attached to other IAM principals. For more information, + // see Understanding the EMR Cluster VisibleToAllUsers Setting (https://docs.aws.amazon.com/emr/latest/ManagementGuide/security_iam_emr-with-iam.html#security_set_visible_to_all_users) + // in the Amazon EMR Management Guide. VisibleToAllUsers *bool `type:"boolean"` } @@ -13739,8 +14238,7 @@ type ScriptBootstrapActionConfig struct { // A list of command line arguments to pass to the bootstrap action script. Args []*string `type:"list"` - // Location of the script to run during a bootstrap action. Can be either a - // location in Amazon S3 or on a local file system. + // Location in Amazon S3 of the script to run during a bootstrap action. // // Path is a required field Path *string `type:"string" required:"true"` @@ -13827,7 +14325,7 @@ type SessionMappingDetail struct { // The name of the user or group. For more information, see UserName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName) // and DisplayName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) - // in the AWS SSO Identity Store API Reference. + // in the Amazon Web Services SSO Identity Store API Reference. IdentityName *string `type:"string"` // Specifies whether the identity mapped to the Amazon EMR Studio is a user @@ -13905,13 +14403,13 @@ type SessionMappingSummary struct { // The time the session mapping was created. CreationTime *time.Time `type:"timestamp"` - // The globally unique identifier (GUID) of the user or group from the AWS SSO - // Identity Store. + // The globally unique identifier (GUID) of the user or group from the Amazon + // Web Services SSO Identity Store. IdentityId *string `type:"string"` // The name of the user or group. For more information, see UserName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName) // and DisplayName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) - // in the AWS SSO Identity Store API Reference. + // in the Amazon Web Services SSO Identity Store API Reference. IdentityName *string `type:"string"` // Specifies whether the identity mapped to the Amazon EMR Studio is a user @@ -14051,10 +14549,11 @@ type SetVisibleToAllUsersInput struct { // JobFlowIds is a required field JobFlowIds []*string `type:"list" required:"true"` - // A value of true indicates that all IAM users in the AWS account can perform - // cluster actions if they have the proper IAM policy permissions. This is the - // default. A value of false indicates that only the IAM user who created the - // cluster can perform actions. + // A value of true indicates that an IAM principal in the account can perform + // EMR actions on the cluster that the IAM policies attached to the principal + // allow. A value of false indicates that only the IAM principal that created + // the cluster and the Amazon Web Services root user can perform EMR actions + // on the cluster. // // VisibleToAllUsers is a required field VisibleToAllUsers *bool `type:"boolean" required:"true"` @@ -14225,6 +14724,39 @@ func (s *SimpleScalingPolicyConfiguration) SetScalingAdjustment(v int64) *Simple return s } +// The returned release label application names or versions. +type SimplifiedApplication struct { + _ struct{} `type:"structure"` + + // The returned release label application name. For example, hadoop. + Name *string `type:"string"` + + // The returned release label application version. For example, 3.2.1. + Version *string `type:"string"` +} + +// String returns the string representation +func (s SimplifiedApplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimplifiedApplication) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *SimplifiedApplication) SetName(v string) *SimplifiedApplication { + s.Name = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *SimplifiedApplication) SetVersion(v string) *SimplifiedApplication { + s.Version = &v + return s +} + // The launch specification for Spot Instances in the instance fleet, which // determines the defined duration, provisioning timeout behavior, and allocation // strategy. @@ -14481,6 +15013,17 @@ type Step struct { // The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, // CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward // compatibility. We recommend using TERMINATE_CLUSTER instead. + // + // If a cluster's StepConcurrencyLevel is greater than 1, do not use AddJobFlowSteps + // to submit a step with this parameter set to CANCEL_AND_WAIT or TERMINATE_CLUSTER. + // The step is not submitted and the action fails with a message that the ActionOnFailure + // setting is not valid. + // + // If you change a cluster's StepConcurrencyLevel to be greater than 1 while + // a step is running, the ActionOnFailure parameter may not behave as you expect. + // In this case, for a step that fails with this parameter set to CANCEL_AND_WAIT, + // pending steps and the running step are not canceled; for a step that fails + // with this parameter set to TERMINATE_CLUSTER, the cluster does not terminate. ActionOnFailure *string `type:"string" enum:"ActionOnFailure"` // The Hadoop job configuration of the cluster step. @@ -14536,13 +15079,32 @@ func (s *Step) SetStatus(v *StepStatus) *Step { return s } -// Specification of a cluster (job flow) step. +// Specification for a cluster (job flow) step. type StepConfig struct { _ struct{} `type:"structure"` - // The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, - // CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for backward - // compatibility. We recommend using TERMINATE_CLUSTER instead. + // The action to take when the step fails. Use one of the following values: + // + // * TERMINATE_CLUSTER - Shuts down the cluster. + // + // * CANCEL_AND_WAIT - Cancels any pending steps and returns the cluster + // to the WAITING state. + // + // * CONTINUE - Continues to the next step in the queue. + // + // * TERMINATE_JOB_FLOW - Shuts down the cluster. TERMINATE_JOB_FLOW is provided + // for backward compatibility. We recommend using TERMINATE_CLUSTER instead. + // + // If a cluster's StepConcurrencyLevel is greater than 1, do not use AddJobFlowSteps + // to submit a step with this parameter set to CANCEL_AND_WAIT or TERMINATE_CLUSTER. + // The step is not submitted and the action fails with a message that the ActionOnFailure + // setting is not valid. + // + // If you change a cluster's StepConcurrencyLevel to be greater than 1 while + // a step is running, the ActionOnFailure parameter may not behave as you expect. + // In this case, for a step that fails with this parameter set to CANCEL_AND_WAIT, + // pending steps and the running step are not canceled; for a step that fails + // with this parameter set to TERMINATE_CLUSTER, the cluster does not terminate. ActionOnFailure *string `type:"string" enum:"ActionOnFailure"` // The JAR file used for the step. @@ -14798,7 +15360,7 @@ type StepSummary struct { // The action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, // CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is available for backward - // compatibility. We recommend using TERMINATE_CLUSTER instead. + // compatibility. ActionOnFailure *string `type:"string" enum:"ActionOnFailure"` // The Hadoop job configuration of the cluster step. @@ -15402,15 +15964,15 @@ type UpdateStudioSessionMappingInput struct { // The globally unique identifier (GUID) of the user or group. For more information, // see UserId (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserId) // and GroupId (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-GroupId) - // in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId - // must be specified. + // in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName + // or IdentityId must be specified. IdentityId *string `type:"string"` // The name of the user or group to update. For more information, see UserName // (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_User.html#singlesignon-Type-User-UserName) // and DisplayName (https://docs.aws.amazon.com/singlesignon/latest/IdentityStoreAPIReference/API_Group.html#singlesignon-Type-Group-DisplayName) - // in the AWS SSO Identity Store API Reference. Either IdentityName or IdentityId - // must be specified. + // in the Amazon Web Services SSO Identity Store API Reference. Either IdentityName + // or IdentityId must be specified. IdentityName *string `type:"string"` // Specifies whether the identity to update is a user or a group. diff --git a/service/emr/doc.go b/service/emr/doc.go index bc4a4d5e9e..41d9800960 100644 --- a/service/emr/doc.go +++ b/service/emr/doc.go @@ -5,8 +5,9 @@ // // Amazon EMR is a web service that makes it easier to process large amounts // of data efficiently. Amazon EMR uses Hadoop processing combined with several -// AWS services to do tasks such as web indexing, data mining, log file analysis, -// machine learning, scientific simulation, and data warehouse management. +// Amazon Web Services services to do tasks such as web indexing, data mining, +// log file analysis, machine learning, scientific simulation, and data warehouse +// management. // // See https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31 for more information on this service. // diff --git a/service/emr/emriface/interface.go b/service/emr/emriface/interface.go index c7a7a58066..4b076708b8 100644 --- a/service/emr/emriface/interface.go +++ b/service/emr/emriface/interface.go @@ -116,6 +116,10 @@ type EMRAPI interface { DescribeNotebookExecutionWithContext(aws.Context, *emr.DescribeNotebookExecutionInput, ...request.Option) (*emr.DescribeNotebookExecutionOutput, error) DescribeNotebookExecutionRequest(*emr.DescribeNotebookExecutionInput) (*request.Request, *emr.DescribeNotebookExecutionOutput) + DescribeReleaseLabel(*emr.DescribeReleaseLabelInput) (*emr.DescribeReleaseLabelOutput, error) + DescribeReleaseLabelWithContext(aws.Context, *emr.DescribeReleaseLabelInput, ...request.Option) (*emr.DescribeReleaseLabelOutput, error) + DescribeReleaseLabelRequest(*emr.DescribeReleaseLabelInput) (*request.Request, *emr.DescribeReleaseLabelOutput) + DescribeSecurityConfiguration(*emr.DescribeSecurityConfigurationInput) (*emr.DescribeSecurityConfigurationOutput, error) DescribeSecurityConfigurationWithContext(aws.Context, *emr.DescribeSecurityConfigurationInput, ...request.Option) (*emr.DescribeSecurityConfigurationOutput, error) DescribeSecurityConfigurationRequest(*emr.DescribeSecurityConfigurationInput) (*request.Request, *emr.DescribeSecurityConfigurationOutput) @@ -182,6 +186,13 @@ type EMRAPI interface { ListNotebookExecutionsPages(*emr.ListNotebookExecutionsInput, func(*emr.ListNotebookExecutionsOutput, bool) bool) error ListNotebookExecutionsPagesWithContext(aws.Context, *emr.ListNotebookExecutionsInput, func(*emr.ListNotebookExecutionsOutput, bool) bool, ...request.Option) error + ListReleaseLabels(*emr.ListReleaseLabelsInput) (*emr.ListReleaseLabelsOutput, error) + ListReleaseLabelsWithContext(aws.Context, *emr.ListReleaseLabelsInput, ...request.Option) (*emr.ListReleaseLabelsOutput, error) + ListReleaseLabelsRequest(*emr.ListReleaseLabelsInput) (*request.Request, *emr.ListReleaseLabelsOutput) + + ListReleaseLabelsPages(*emr.ListReleaseLabelsInput, func(*emr.ListReleaseLabelsOutput, bool) bool) error + ListReleaseLabelsPagesWithContext(aws.Context, *emr.ListReleaseLabelsInput, func(*emr.ListReleaseLabelsOutput, bool) bool, ...request.Option) error + ListSecurityConfigurations(*emr.ListSecurityConfigurationsInput) (*emr.ListSecurityConfigurationsOutput, error) ListSecurityConfigurationsWithContext(aws.Context, *emr.ListSecurityConfigurationsInput, ...request.Option) (*emr.ListSecurityConfigurationsOutput, error) ListSecurityConfigurationsRequest(*emr.ListSecurityConfigurationsInput) (*request.Request, *emr.ListSecurityConfigurationsOutput) diff --git a/service/iam/api.go b/service/iam/api.go index f3ed7f565d..9febb72df4 100644 --- a/service/iam/api.go +++ b/service/iam/api.go @@ -1343,6 +1343,13 @@ func (c *IAM) CreateOpenIDConnectProviderRequest(input *CreateOpenIDConnectProvi // You get all of this information from the OIDC IdP that you want to use to // access Amazon Web Services. // +// Amazon Web Services secures communication with some OIDC identity providers +// (IdPs) through our library of trusted certificate authorities (CAs) instead +// of using a certificate thumbprint to verify your IdP server certificate. +// These OIDC IdPs include Google, and those that use an Amazon S3 bucket to +// host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint +// remains in your configuration, but is no longer used for validation. +// // The trust for the OIDC provider is derived from the IAM provider that this // operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider // operation to highly privileged users. @@ -16393,12 +16400,19 @@ func (c *IAM) UpdateOpenIDConnectProviderThumbprintRequest(input *UpdateOpenIDCo // The list that you pass with this operation completely replaces the existing // list of thumbprints. (The lists are not merged.) // -// Typically, you need to update a thumbprint only when the identity provider's +// Typically, you need to update a thumbprint only when the identity provider // certificate changes, which occurs rarely. However, if the provider's certificate // does change, any attempt to assume an IAM role that specifies the OIDC provider // as a principal fails until the certificate thumbprint is updated. // -// Trust for the OIDC provider is derived from the provider's certificate and +// Amazon Web Services secures communication with some OIDC identity providers +// (IdPs) through our library of trusted certificate authorities (CAs) instead +// of using a certificate thumbprint to verify your IdP server certificate. +// These OIDC IdPs include Google, and those that use an Amazon S3 bucket to +// host a JSON Web Key Set (JWKS) endpoint. In these cases, your legacy thumbprint +// remains in your configuration, but is no longer used for validation. +// +// Trust for the OIDC provider is derived from the provider certificate and // is validated by the thumbprint. Therefore, it is best to limit access to // the UpdateOpenIDConnectProviderThumbprint operation to highly privileged // users. diff --git a/service/kendra/api.go b/service/kendra/api.go index e292606ed6..3265fe9eda 100644 --- a/service/kendra/api.go +++ b/service/kendra/api.go @@ -3439,7 +3439,7 @@ func (c *Kendra) SubmitFeedbackRequest(input *SubmitFeedbackInput) (req *request // SubmitFeedback API operation for AWSKendraFrontendService. // // Enables you to provide feedback to Amazon Kendra to improve the performance -// of the service. +// of your index. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4351,6 +4351,10 @@ func (s *AdditionalResultAttributeValue) SetTextWithHighlightsValue(v *TextWithH // // If you use more than 2 layers, you receive a ValidationException exception // with the message "AttributeFilter cannot have a depth of more than 2." +// +// If you use more than 10 attribute filters, you receive a ValidationException +// exception with the message "AttributeFilter cannot have a length of more +// than 10". type AttributeFilter struct { _ struct{} `type:"structure"` @@ -5126,21 +5130,23 @@ type CapacityUnitsConfiguration struct { // The amount of extra query capacity for an index and GetQuerySuggestions (https://docs.aws.amazon.com/kendra/latest/dg/API_GetQuerySuggestions.html) // capacity. // - // A single extra capacity unit for an index provides 0.5 queries per second - // or approximately 40,000 queries per day. + // A single extra capacity unit for an index provides 0.1 queries per second + // or approximately 8,000 queries per day. // - // GetQuerySuggestions capacity is 5 times the provisioned query capacity for - // an index. For example, the base capacity for an index is 0.5 queries per - // second, so GetQuerySuggestions capacity is 2.5 calls per second. If adding - // another 0.5 queries per second to total 1 queries per second for an index, - // the GetQuerySuggestions capacity is 5 calls per second. + // GetQuerySuggestions capacity is five times the provisioned query capacity + // for an index, or the base capacity of 2.5 calls per second, whichever is + // higher. For example, the base capacity for an index is 0.1 queries per second, + // and GetQuerySuggestions capacity has a base of 2.5 calls per second. If you + // add another 0.1 queries per second to total 0.2 queries per second for an + // index, the GetQuerySuggestions capacity is 2.5 calls per second (higher than + // five times 0.2 queries per second). // // QueryCapacityUnits is a required field QueryCapacityUnits *int64 `type:"integer" required:"true"` // The amount of extra storage capacity for an index. A single capacity unit - // for an index provides 150 GB of storage space or 500,000 documents, whichever - // is reached first. + // provides 30 GB of storage space or 100,000 documents, whichever is reached + // first. // // StorageCapacityUnits is a required field StorageCapacityUnits *int64 `type:"integer" required:"true"` @@ -6199,11 +6205,10 @@ type ConnectionConfiguration struct { // DatabasePort is a required field DatabasePort *int64 `min:"1" type:"integer" required:"true"` - // The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager. + // The Amazon Resource Name (ARN) of credentials stored in Secrets Manager. // The credentials should be a user/password pair. For more information, see // Using a Database Data Source (https://docs.aws.amazon.com/kendra/latest/dg/data-source-database.html). - // For more information about AWS Secrets Manager, see What Is AWS Secrets Manager - // (https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) + // For more information about Secrets Manager, see What Is Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) // in the Secrets Manager user guide. // // SecretArn is a required field @@ -6684,6 +6689,9 @@ type CreateIndexInput struct { // // The Edition parameter is optional. If you don't supply a value, the default // is ENTERPRISE_EDITION. + // + // For more information on quota limits for enterprise and developer editions, + // see Quotas (https://docs.aws.amazon.com/kendra/latest/dg/quotas.html). Edition *string `type:"string" enum:"IndexEdition"` // The name for the new index. @@ -7232,6 +7240,10 @@ type DataSourceConfiguration struct { // Provides the configuration information required for Amazon Kendra web crawler. WebCrawlerConfiguration *WebCrawlerConfiguration `type:"structure"` + + // Provides the configuration information to connect to WorkDocs as your data + // source. + WorkDocsConfiguration *WorkDocsConfiguration `type:"structure"` } // String returns the string representation @@ -7292,6 +7304,11 @@ func (s *DataSourceConfiguration) Validate() error { invalidParams.AddNested("WebCrawlerConfiguration", err.(request.ErrInvalidParams)) } } + if s.WorkDocsConfiguration != nil { + if err := s.WorkDocsConfiguration.Validate(); err != nil { + invalidParams.AddNested("WorkDocsConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -7353,6 +7370,12 @@ func (s *DataSourceConfiguration) SetWebCrawlerConfiguration(v *WebCrawlerConfig return s } +// SetWorkDocsConfiguration sets the WorkDocsConfiguration field's value. +func (s *DataSourceConfiguration) SetWorkDocsConfiguration(v *WorkDocsConfiguration) *DataSourceConfiguration { + s.WorkDocsConfiguration = v + return s +} + // Data source information for user context filtering. type DataSourceGroup struct { _ struct{} `type:"structure"` @@ -15147,12 +15170,11 @@ type SharePointConfiguration struct { // The regex is applied to the display URL of the SharePoint document. InclusionPatterns []*string `type:"list"` - // The Amazon Resource Name (ARN) of credentials stored in AWS Secrets Manager. - // The credentials should be a user/password pair. If you use SharePoint Sever, + // The Amazon Resource Name (ARN) of credentials stored in Secrets Manager. + // The credentials should be a user/password pair. If you use SharePoint Server, // you also need to provide the sever domain name as part of the credentials. // For more information, see Using a Microsoft SharePoint Data Source (https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html). - // For more information about AWS Secrets Manager, see What Is AWS Secrets Manager - // (https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) + // For more information about Secrets Manager, see What Is Secrets Manager (https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) // in the Secrets Manager user guide. // // SecretArn is a required field @@ -17507,6 +17529,141 @@ func (s *WebCrawlerConfiguration) SetUrls(v *Urls) *WebCrawlerConfiguration { return s } +// Provides the configuration information to connect to Amazon WorkDocs as your +// data source. +// +// Amazon WorkDocs connector is available in Oregon, North Virginia, Sydney, +// Singapore and Ireland regions. +type WorkDocsConfiguration struct { + _ struct{} `type:"structure"` + + // TRUE to include comments on documents in your index. Including comments in + // your index means each comment is a document that can be searched on. + // + // The default is set to FALSE. + CrawlComments *bool `type:"boolean"` + + // A list of regular expression patterns to exclude certain files in your Amazon + // WorkDocs site repository. Files that match the patterns are excluded from + // the index. Files that don’t match the patterns are included in the index. + // If a file matches both an inclusion pattern and an exclusion pattern, the + // exclusion pattern takes precedence and the file isn’t included in the index. + ExclusionPatterns []*string `type:"list"` + + // A list of DataSourceToIndexFieldMapping objects that map Amazon WorkDocs + // field names to custom index field names in Amazon Kendra. You must first + // create the custom index fields using the UpdateIndex operation before you + // map to Amazon WorkDocs fields. For more information, see Mapping Data Source + // Fields (https://docs.aws.amazon.com/kendra/latest/dg/field-mapping.html). + // The Amazon WorkDocs data source field names need to exist in your Amazon + // WorkDocs custom metadata. + FieldMappings []*DataSourceToIndexFieldMapping `min:"1" type:"list"` + + // A list of regular expression patterns to include certain files in your Amazon + // WorkDocs site repository. Files that match the patterns are included in the + // index. Files that don't match the patterns are excluded from the index. If + // a file matches both an inclusion pattern and an exclusion pattern, the exclusion + // pattern takes precedence and the file isn’t included in the index. + InclusionPatterns []*string `type:"list"` + + // The identifier of the directory corresponding to your Amazon WorkDocs site + // repository. + // + // You can find the organization ID in the AWS Directory Service (https://console.aws.amazon.com/directoryservicev2/) + // by going to Active Directory, then Directories. Your Amazon WorkDocs site + // directory has an ID, which is the organization ID. You can also set up a + // new Amazon WorkDocs directory in the AWS Directory Service console and enable + // a Amazon WorkDocs site for the directory in the Amazon WorkDocs console. + // + // OrganizationId is a required field + OrganizationId *string `min:"12" type:"string" required:"true"` + + // TRUE to use the change logs to update documents in your index instead of + // scanning all documents. + // + // If you are syncing your Amazon WorkDocs data source with your index for the + // first time, all documents are scanned. After your first sync, you can use + // the change logs to update your documents in your index for future syncs. + // + // The default is set to FALSE. + UseChangeLog *bool `type:"boolean"` +} + +// String returns the string representation +func (s WorkDocsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkDocsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WorkDocsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WorkDocsConfiguration"} + if s.FieldMappings != nil && len(s.FieldMappings) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FieldMappings", 1)) + } + if s.OrganizationId == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationId")) + } + if s.OrganizationId != nil && len(*s.OrganizationId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 12)) + } + if s.FieldMappings != nil { + for i, v := range s.FieldMappings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FieldMappings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCrawlComments sets the CrawlComments field's value. +func (s *WorkDocsConfiguration) SetCrawlComments(v bool) *WorkDocsConfiguration { + s.CrawlComments = &v + return s +} + +// SetExclusionPatterns sets the ExclusionPatterns field's value. +func (s *WorkDocsConfiguration) SetExclusionPatterns(v []*string) *WorkDocsConfiguration { + s.ExclusionPatterns = v + return s +} + +// SetFieldMappings sets the FieldMappings field's value. +func (s *WorkDocsConfiguration) SetFieldMappings(v []*DataSourceToIndexFieldMapping) *WorkDocsConfiguration { + s.FieldMappings = v + return s +} + +// SetInclusionPatterns sets the InclusionPatterns field's value. +func (s *WorkDocsConfiguration) SetInclusionPatterns(v []*string) *WorkDocsConfiguration { + s.InclusionPatterns = v + return s +} + +// SetOrganizationId sets the OrganizationId field's value. +func (s *WorkDocsConfiguration) SetOrganizationId(v string) *WorkDocsConfiguration { + s.OrganizationId = &v + return s +} + +// SetUseChangeLog sets the UseChangeLog field's value. +func (s *WorkDocsConfiguration) SetUseChangeLog(v bool) *WorkDocsConfiguration { + s.UseChangeLog = &v + return s +} + const ( // AdditionalResultAttributeValueTypeTextWithHighlightsValue is a AdditionalResultAttributeValueType enum value AdditionalResultAttributeValueTypeTextWithHighlightsValue = "TEXT_WITH_HIGHLIGHTS_VALUE" @@ -17833,6 +17990,9 @@ const ( // DataSourceTypeWebcrawler is a DataSourceType enum value DataSourceTypeWebcrawler = "WEBCRAWLER" + + // DataSourceTypeWorkdocs is a DataSourceType enum value + DataSourceTypeWorkdocs = "WORKDOCS" ) // DataSourceType_Values returns all elements of the DataSourceType enum @@ -17848,6 +18008,7 @@ func DataSourceType_Values() []string { DataSourceTypeConfluence, DataSourceTypeGoogledrive, DataSourceTypeWebcrawler, + DataSourceTypeWorkdocs, } } diff --git a/service/lambda/api.go b/service/lambda/api.go index 398368ab62..ad7ec28eab 100644 --- a/service/lambda/api.go +++ b/service/lambda/api.go @@ -1334,6 +1334,9 @@ func (c *Lambda) DeleteFunctionEventInvokeConfigRequest(input *DeleteFunctionEve // * TooManyRequestsException // The request throughput limit was exceeded. // +// * ResourceConflictException +// The resource already exists, or another operation is in progress. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/DeleteFunctionEventInvokeConfig func (c *Lambda) DeleteFunctionEventInvokeConfig(input *DeleteFunctionEventInvokeConfigInput) (*DeleteFunctionEventInvokeConfigOutput, error) { req, out := c.DeleteFunctionEventInvokeConfigRequest(input) @@ -5130,6 +5133,9 @@ func (c *Lambda) PutFunctionEventInvokeConfigRequest(input *PutFunctionEventInvo // * TooManyRequestsException // The request throughput limit was exceeded. // +// * ResourceConflictException +// The resource already exists, or another operation is in progress. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/PutFunctionEventInvokeConfig func (c *Lambda) PutFunctionEventInvokeConfig(input *PutFunctionEventInvokeConfigInput) (*PutFunctionEventInvokeConfigOutput, error) { req, out := c.PutFunctionEventInvokeConfigRequest(input) @@ -6230,6 +6236,9 @@ func (c *Lambda) UpdateFunctionEventInvokeConfigRequest(input *UpdateFunctionEve // * TooManyRequestsException // The request throughput limit was exceeded. // +// * ResourceConflictException +// The resource already exists, or another operation is in progress. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/lambda-2015-03-31/UpdateFunctionEventInvokeConfig func (c *Lambda) UpdateFunctionEventInvokeConfig(input *UpdateFunctionEventInvokeConfigInput) (*UpdateFunctionEventInvokeConfigOutput, error) { req, out := c.UpdateFunctionEventInvokeConfigRequest(input) @@ -7390,7 +7399,7 @@ type CreateEventSourceMappingInput struct { // The Self-Managed Apache Kafka cluster to send records. SelfManagedEventSource *SelfManagedEventSource `type:"structure"` - // An array of the authentication protocol, or the VPC components to secure + // An array of authentication protocols or VPC components required to secure // your event source. SourceAccessConfigurations []*SourceAccessConfiguration `type:"list"` @@ -9102,8 +9111,8 @@ func (s *EnvironmentResponse) SetVariables(v map[string]*string) *EnvironmentRes return s } -// A mapping between an Amazon Web Services resource and an Lambda function. -// See CreateEventSourceMapping for details. +// A mapping between an Amazon Web Services resource and a Lambda function. +// For details, see CreateEventSourceMapping. type EventSourceMappingConfiguration struct { _ struct{} `type:"structure"` @@ -9128,14 +9137,15 @@ type EventSourceMappingConfiguration struct { // source mapping. FunctionResponseTypes []*string `type:"list"` - // The date that the event source mapping was last updated, or its state changed. + // The date that the event source mapping was last updated or that its state + // changed. LastModified *time.Time `type:"timestamp"` - // The result of the last Lambda invocation of your Lambda function. + // The result of the last Lambda invocation of your function. LastProcessingResult *string `type:"string"` - // (Streams and SQS standard queues) The maximum amount of time to gather records - // before invoking the function, in seconds. The default value is zero. + // (Streams and Amazon SQS standard queues) The maximum amount of time to gather + // records before invoking the function, in seconds. The default value is zero. MaximumBatchingWindowInSeconds *int64 `type:"integer"` // (Streams only) Discard records older than the specified age. The default @@ -9149,23 +9159,23 @@ type EventSourceMappingConfiguration struct { // the record expires in the event source. MaximumRetryAttempts *int64 `type:"integer"` - // (Streams only) The number of batches to process from each shard concurrently. + // (Streams only) The number of batches to process concurrently from each shard. // The default value is 1. ParallelizationFactor *int64 `min:"1" type:"integer"` - // (MQ) The name of the Amazon MQ broker destination queue to consume. + // (Amazon MQ) The name of the Amazon MQ broker destination queue to consume. Queues []*string `min:"1" type:"list"` - // The Self-Managed Apache Kafka cluster for your event source. + // The self-managed Apache Kafka cluster for your event source. SelfManagedEventSource *SelfManagedEventSource `type:"structure"` - // An array of the authentication protocol, or the VPC components to secure - // your event source. + // An array of the authentication protocol, VPC components, or virtual host + // to secure and define your event source. SourceAccessConfigurations []*SourceAccessConfiguration `type:"list"` // The position in a stream from which to start reading. Required for Amazon - // Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is - // only supported for Amazon Kinesis streams. + // Kinesis, Amazon DynamoDB, and Amazon MSK stream sources. AT_TIMESTAMP is + // supported only for Amazon Kinesis streams. StartingPosition *string `type:"string" enum:"EventSourcePosition"` // With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. @@ -9175,15 +9185,15 @@ type EventSourceMappingConfiguration struct { // Enabling, Enabled, Disabling, Disabled, Updating, or Deleting. State *string `type:"string"` - // Indicates whether the last change to the event source mapping was made by - // a user, or by the Lambda service. + // Indicates whether a user or Lambda made the last change to the event source + // mapping. StateTransitionReason *string `type:"string"` // The name of the Kafka topic. Topics []*string `min:"1" type:"list"` // (Streams only) The duration in seconds of a processing window. The range - // is between 1 second up to 900 seconds. + // is 1–900 seconds. TumblingWindowInSeconds *int64 `type:"integer"` // The identifier of the event source mapping. @@ -12753,7 +12763,9 @@ type ListEventSourceMappingsInput struct { // A pagination token returned by a previous call. Marker *string `location:"querystring" locationName:"Marker" type:"string"` - // The maximum number of event source mappings to return. + // The maximum number of event source mappings to return. Note that ListEventSourceMappings + // returns a maximum of 100 items in each response, even if you set the number + // higher. MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` } @@ -15219,7 +15231,7 @@ func (s *ResourceNotReadyException) RequestID() string { return s.RespMetadata.RequestID } -// The Self-Managed Apache Kafka cluster for your event source. +// The self-managed Apache Kafka cluster for your event source. type SelfManagedEventSource struct { _ struct{} `type:"structure"` @@ -15315,31 +15327,35 @@ func (s *ServiceException) RequestID() string { return s.RespMetadata.RequestID } -// You can specify the authentication protocol, or the VPC components to secure -// access to your event source. +// To secure and define access to your event source, you can specify the authentication +// protocol, VPC components, or virtual host. type SourceAccessConfiguration struct { _ struct{} `type:"structure"` - // The type of authentication protocol or the VPC components for your event - // source. For example: "Type":"SASL_SCRAM_512_AUTH". + // The type of authentication protocol, VPC components, or virtual host for + // your event source. For example: "Type":"SASL_SCRAM_512_AUTH". + // + // * BASIC_AUTH - (Amazon MQ) The Secrets Manager secret that stores your + // broker credentials. // - // * BASIC_AUTH - (MQ) The Secrets Manager secret that stores your broker - // credentials. + // * BASIC_AUTH - (Self-managed Apache Kafka) The Secrets Manager ARN of + // your secret key used for SASL/PLAIN authentication of your Apache Kafka + // brokers. // // * VPC_SUBNET - The subnets associated with your VPC. Lambda connects to - // these subnets to fetch data from your Self-Managed Apache Kafka cluster. + // these subnets to fetch data from your self-managed Apache Kafka cluster. // // * VPC_SECURITY_GROUP - The VPC security group used to manage access to - // your Self-Managed Apache Kafka brokers. + // your self-managed Apache Kafka brokers. // // * SASL_SCRAM_256_AUTH - The Secrets Manager ARN of your secret key used - // for SASL SCRAM-256 authentication of your Self-Managed Apache Kafka brokers. + // for SASL SCRAM-256 authentication of your self-managed Apache Kafka brokers. // // * SASL_SCRAM_512_AUTH - The Secrets Manager ARN of your secret key used - // for SASL SCRAM-512 authentication of your Self-Managed Apache Kafka brokers. + // for SASL SCRAM-512 authentication of your self-managed Apache Kafka brokers. // - // * VIRTUAL_HOST - The name of the virtual host in your RabbitMQ broker. - // Lambda will use this host as the event source. + // * VIRTUAL_HOST - (Amazon MQ) The name of the virtual host in your RabbitMQ + // broker. Lambda uses this RabbitMQ host as the event source. Type *string `type:"string" enum:"SourceAccessType"` // The value for your chosen configuration in Type. For example: "URI": "arn:aws:secretsmanager:us-east-1:01234567890:secret:MyBrokerSecretName". @@ -16023,7 +16039,7 @@ type UpdateEventSourceMappingInput struct { // (Streams only) The number of batches to process from each shard concurrently. ParallelizationFactor *int64 `min:"1" type:"integer"` - // An array of the authentication protocol, or the VPC components to secure + // An array of authentication protocols or VPC components required to secure // your event source. SourceAccessConfigurations []*SourceAccessConfiguration `type:"list"` diff --git a/service/personalize/api.go b/service/personalize/api.go index 3246d3f67d..0addaf44ed 100644 --- a/service/personalize/api.go +++ b/service/personalize/api.go @@ -410,9 +410,8 @@ func (c *Personalize) CreateDatasetExportJobRequest(input *CreateDatasetExportJo // // Creates a job that exports data from your dataset to an Amazon S3 bucket. // To allow Amazon Personalize to export the training data, you must specify -// an service-linked AWS Identity and Access Management (IAM) role that gives -// Amazon Personalize PutObject permissions for your Amazon S3 bucket. For information, -// see Exporting a dataset (https://docs.aws.amazon.com/personalize/latest/dg/export-data.html) +// an service-linked IAM role that gives Amazon Personalize PutObject permissions +// for your Amazon S3 bucket. For information, see Exporting a dataset (https://docs.aws.amazon.com/personalize/latest/dg/export-data.html) // in the Amazon Personalize developer guide. // // Status @@ -542,8 +541,8 @@ func (c *Personalize) CreateDatasetGroupRequest(input *CreateDatasetGroupInput) // You must wait until the status of the dataset group is ACTIVE before adding // a dataset to the group. // -// You can specify an AWS Key Management Service (KMS) key to encrypt the datasets -// in the group. If you specify a KMS key, you must also include an AWS Identity +// You can specify an Key Management Service (KMS) key to encrypt the datasets +// in the group. If you specify a KMS key, you must also include an Identity // and Access Management (IAM) role that has permission to access the key. // // APIs that require a dataset group ARN in the request @@ -647,11 +646,11 @@ func (c *Personalize) CreateDatasetImportJobRequest(input *CreateDatasetImportJo // // Creates a job that imports training data from your data source (an Amazon // S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize -// to import the training data, you must specify an AWS Identity and Access -// Management (IAM) service role that has permission to read from the data source, -// as Amazon Personalize makes a copy of your data and processes it in an internal -// AWS system. For information on granting access to your Amazon S3 bucket, -// see Giving Amazon Personalize Access to Amazon S3 Resources (https://docs.aws.amazon.com/personalize/latest/dg/granting-personalize-s3-access.html). +// to import the training data, you must specify an IAM service role that has +// permission to read from the data source, as Amazon Personalize makes a copy +// of your data and processes it internally. For information on granting access +// to your Amazon S3 bucket, see Giving Amazon Personalize Access to Amazon +// S3 Resources (https://docs.aws.amazon.com/personalize/latest/dg/granting-personalize-s3-access.html). // // The dataset import job replaces any existing data in the dataset that you // imported in bulk. @@ -6194,9 +6193,7 @@ type CreateCampaignInput struct { // Specifies the requested minimum provisioned transactions (recommendations) // per second that Amazon Personalize will support. - // - // MinProvisionedTPS is a required field - MinProvisionedTPS *int64 `locationName:"minProvisionedTPS" min:"1" type:"integer" required:"true"` + MinProvisionedTPS *int64 `locationName:"minProvisionedTPS" min:"1" type:"integer"` // A name for the new campaign. The campaign name must be unique within your // account. @@ -6223,9 +6220,6 @@ func (s CreateCampaignInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateCampaignInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateCampaignInput"} - if s.MinProvisionedTPS == nil { - invalidParams.Add(request.NewErrParamRequired("MinProvisionedTPS")) - } if s.MinProvisionedTPS != nil && *s.MinProvisionedTPS < 1 { invalidParams.Add(request.NewErrParamMinValue("MinProvisionedTPS", 1)) } @@ -6316,8 +6310,8 @@ type CreateDatasetExportJobInput struct { // JobOutput is a required field JobOutput *DatasetExportJobOutput `locationName:"jobOutput" type:"structure" required:"true"` - // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // service role that has permissions to add data to your output Amazon S3 bucket. + // The Amazon Resource Name (ARN) of the IAM service role that has permissions + // to add data to your output Amazon S3 bucket. // // RoleArn is a required field RoleArn *string `locationName:"roleArn" type:"string" required:"true"` @@ -6419,7 +6413,8 @@ func (s *CreateDatasetExportJobOutput) SetDatasetExportJobArn(v string) *CreateD type CreateDatasetGroupInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of a KMS key used to encrypt the datasets. + // The Amazon Resource Name (ARN) of a Key Management Service (KMS) key used + // to encrypt the datasets. KmsKeyArn *string `locationName:"kmsKeyArn" type:"string"` // The name for the new dataset group. @@ -6427,8 +6422,9 @@ type CreateDatasetGroupInput struct { // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // The ARN of the IAM role that has permissions to access the KMS key. Supplying - // an IAM role is only valid when also specifying a KMS key. + // The ARN of the Identity and Access Management (IAM) role that has permissions + // to access the Key Management Service (KMS) key. Supplying an IAM role is + // only valid when also specifying a KMS key. RoleArn *string `locationName:"roleArn" type:"string"` } @@ -7383,8 +7379,8 @@ type DatasetExportJob struct { // last updated. LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` - // The Amazon Resource Name (ARN) of the AWS Identity and Access Management - // service role that has permissions to add data to your output Amazon S3 bucket. + // The Amazon Resource Name (ARN) of the IAM service role that has permissions + // to add data to your output Amazon S3 bucket. RoleArn *string `locationName:"roleArn" type:"string"` // The status of the dataset export job. @@ -7590,7 +7586,7 @@ func (s *DatasetExportJobSummary) SetStatus(v string) *DatasetExportJobSummary { // The dataset group is used to create and train a solution by calling CreateSolution. // A dataset group can contain only one of each type of dataset. // -// You can specify an AWS Key Management Service (KMS) key to encrypt the datasets +// You can specify an Key Management Service (KMS) key to encrypt the datasets // in the group. type DatasetGroup struct { _ struct{} `type:"structure"` @@ -7604,7 +7600,8 @@ type DatasetGroup struct { // If creating a dataset group fails, provides the reason why. FailureReason *string `locationName:"failureReason" type:"string"` - // The Amazon Resource Name (ARN) of the KMS key used to encrypt the datasets. + // The Amazon Resource Name (ARN) of the Key Management Service (KMS) key used + // to encrypt the datasets. KmsKeyArn *string `locationName:"kmsKeyArn" type:"string"` // The last update date and time (in Unix time) of the dataset group. @@ -7791,8 +7788,8 @@ type DatasetImportJob struct { // The date and time (in Unix time) the dataset was last updated. LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` - // The ARN of the AWS Identity and Access Management (IAM) role that has permissions - // to read from the Amazon S3 data source. + // The ARN of the IAM role that has permissions to read from the Amazon S3 data + // source. RoleArn *string `locationName:"roleArn" type:"string"` // The status of the dataset import job. @@ -9574,7 +9571,7 @@ func (s *DescribeSolutionVersionOutput) SetSolutionVersion(v *SolutionVersion) * type EventTracker struct { _ struct{} `type:"structure"` - // The Amazon AWS account that owns the event tracker. + // The Amazon Web Services account that owns the event tracker. AccountId *string `locationName:"accountId" type:"string"` // The date and time (in Unix format) that the event tracker was created. @@ -11901,9 +11898,9 @@ func (s *ResourceNotFoundException) RequestID() string { type S3DataConfig struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the Amazon Key Management Service (KMS) - // key that Amazon Personalize uses to encrypt or decrypt the input and output - // files of a batch inference job. + // The Amazon Resource Name (ARN) of the Key Management Service (KMS) key that + // Amazon Personalize uses to encrypt or decrypt the input and output files + // of a batch inference job. KmsKeyArn *string `locationName:"kmsKeyArn" type:"string"` // The file path of the Amazon S3 bucket. diff --git a/service/proton/api.go b/service/proton/api.go index a1450bcea5..7d0ed89d19 100644 --- a/service/proton/api.go +++ b/service/proton/api.go @@ -63,7 +63,7 @@ func (c *Proton) AcceptEnvironmentAccountConnectionRequest(input *AcceptEnvironm // in the associated environment account. // // For more information, see Environment account connections (proton/latest/adminguide/ag-env-account-connections.html) -// in the AWS Proton Administration guide. +// in the AWS Proton Administrator guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -161,7 +161,7 @@ func (c *Proton) CancelEnvironmentDeploymentRequest(input *CancelEnvironmentDepl // Attempts to cancel an environment deployment on an UpdateEnvironment action, // if the deployment is IN_PROGRESS. For more information, see Update an environment // (https://docs.aws.amazon.com/proton/latest/adminguide/ag-env-update.html) -// in the AWS Proton Administration guide. +// in the AWS Proton Administrator guide. // // The following list includes potential cancellation scenarios. // @@ -270,7 +270,7 @@ func (c *Proton) CancelServiceInstanceDeploymentRequest(input *CancelServiceInst // // Attempts to cancel a service instance deployment on an UpdateServiceInstance // action, if the deployment is IN_PROGRESS. For more information, see Update -// a service instance in the AWS Proton Administration guide (https://docs.aws.amazon.com/proton/latest/adminguide/ag-svc-instance-update.html) +// a service instance in the AWS Proton Administrator guide (https://docs.aws.amazon.com/proton/latest/adminguide/ag-svc-instance-update.html) // or the AWS Proton User guide (https://docs.aws.amazon.com/proton/latest/userguide/ug-svc-instance-update.html). // // The following list includes potential cancellation scenarios. @@ -380,7 +380,7 @@ func (c *Proton) CancelServicePipelineDeploymentRequest(input *CancelServicePipe // // Attempts to cancel a service pipeline deployment on an UpdateServicePipeline // action, if the deployment is IN_PROGRESS. For more information, see Update -// a service pipeline in the AWS Proton Administration guide (https://docs.aws.amazon.com/proton/latest/adminguide/ag-svc-pipeline-update.html) +// a service pipeline in the AWS Proton Administrator guide (https://docs.aws.amazon.com/proton/latest/adminguide/ag-svc-pipeline-update.html) // or the AWS Proton User guide (https://docs.aws.amazon.com/proton/latest/userguide/ug-svc-pipeline-update.html). // // The following list includes potential cancellation scenarios. @@ -491,7 +491,7 @@ func (c *Proton) CreateEnvironmentRequest(input *CreateEnvironmentInput) (req *r // Deploy a new environment. An AWS Proton environment is created from an environment // template that defines infrastructure and resources that can be shared across // services. For more information, see the Environments (https://docs.aws.amazon.com/proton/latest/adminguide/ag-environments.html) -// in the AWS Proton Administration Guide. +// in the AWS Proton Administrator Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -503,7 +503,7 @@ func (c *Proton) CreateEnvironmentRequest(input *CreateEnvironmentInput) (req *r // Returned Error Types: // * ServiceQuotaExceededException // A quota was exceeded. For more information, see AWS Proton Quotas (https://docs.aws.amazon.com/proton/latest/adminguide/ag-limits.html) -// in the AWS Proton Administration Guide. +// in the AWS Proton Administrator Guide. // // * ValidationException // The input is invalid or an out-of-range value was supplied for the input @@ -592,13 +592,13 @@ func (c *Proton) CreateEnvironmentAccountConnectionRequest(input *CreateEnvironm // // Create an environment account connection in an environment account so that // environment infrastructure resources can be provisioned in the environment -// account from the management account. +// account from a management account. // // An environment account connection is a secure bi-directional connection between // a management account and an environment account that maintains authorization // and permissions. For more information, see Environment account connections // (proton/latest/adminguide/ag-env-account-connections.html) in the AWS Proton -// Administration guide. +// Administrator guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -610,7 +610,7 @@ func (c *Proton) CreateEnvironmentAccountConnectionRequest(input *CreateEnvironm // Returned Error Types: // * ServiceQuotaExceededException // A quota was exceeded. For more information, see AWS Proton Quotas (https://docs.aws.amazon.com/proton/latest/adminguide/ag-limits.html) -// in the AWS Proton Administration Guide. +// in the AWS Proton Administrator Guide. // // * ValidationException // The input is invalid or an out-of-range value was supplied for the input @@ -695,8 +695,8 @@ func (c *Proton) CreateEnvironmentTemplateRequest(input *CreateEnvironmentTempla // CreateEnvironmentTemplate API operation for AWS Proton. // // Create an environment template for AWS Proton. For more information, see -// Environment Templates (https://docs.aws.amazon.com/proton/latest/adminguide/ag-env-templates.html) -// in the AWS Proton Administration Guide. +// Environment Templates (https://docs.aws.amazon.com/proton/latest/adminguide/ag-templates.html) +// in the AWS Proton Administrator Guide. // // You can create an environment template in one of the two following ways: // @@ -709,8 +709,8 @@ func (c *Proton) CreateEnvironmentTemplateRequest(input *CreateEnvironmentTempla // create an environment template for customer provisioned and managed infrastructure, // include the provisioning parameter and set the value to CUSTOMER_MANAGED. // For more information, see Register and publish an environment template -// (https://docs.aws.amazon.com/proton/latest/adminguide/env-template-v1.html) -// in the AWS Proton Administration Guide. +// (https://docs.aws.amazon.com/proton/latest/adminguide/template-create.html) +// in the AWS Proton Administrator Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -722,7 +722,7 @@ func (c *Proton) CreateEnvironmentTemplateRequest(input *CreateEnvironmentTempla // Returned Error Types: // * ServiceQuotaExceededException // A quota was exceeded. For more information, see AWS Proton Quotas (https://docs.aws.amazon.com/proton/latest/adminguide/ag-limits.html) -// in the AWS Proton Administration Guide. +// in the AWS Proton Administrator Guide. // // * ValidationException // The input is invalid or an out-of-range value was supplied for the input @@ -821,7 +821,7 @@ func (c *Proton) CreateEnvironmentTemplateVersionRequest(input *CreateEnvironmen // Returned Error Types: // * ServiceQuotaExceededException // A quota was exceeded. For more information, see AWS Proton Quotas (https://docs.aws.amazon.com/proton/latest/adminguide/ag-limits.html) -// in the AWS Proton Administration Guide. +// in the AWS Proton Administrator Guide. // // * ValidationException // The input is invalid or an out-of-range value was supplied for the input @@ -911,7 +911,7 @@ func (c *Proton) CreateServiceRequest(input *CreateServiceInput) (req *request.R // Create an AWS Proton service. An AWS Proton service is an instantiation of // a service template and often includes several service instances and pipeline. // For more information, see Services (https://docs.aws.amazon.com/proton/latest/adminguide/ag-services.html) -// in the AWS Proton Administration Guide and Services (https://docs.aws.amazon.com/proton/latest/userguide/ug-service.html) +// in the AWS Proton Administrator Guide and Services (https://docs.aws.amazon.com/proton/latest/userguide/ug-service.html) // in the AWS Proton User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -924,7 +924,7 @@ func (c *Proton) CreateServiceRequest(input *CreateServiceInput) (req *request.R // Returned Error Types: // * ServiceQuotaExceededException // A quota was exceeded. For more information, see AWS Proton Quotas (https://docs.aws.amazon.com/proton/latest/adminguide/ag-limits.html) -// in the AWS Proton Administration Guide. +// in the AWS Proton Administrator Guide. // // * ValidationException // The input is invalid or an out-of-range value was supplied for the input @@ -1018,7 +1018,7 @@ func (c *Proton) CreateServiceTemplateRequest(input *CreateServiceTemplateInput) // a link to their source code repository. AWS Proton then deploys and manages // the infrastructure defined by the selected service template. For more information, // see Service Templates (https://docs.aws.amazon.com/proton/latest/adminguide/managing-svc-templates.html) -// in the AWS Proton Administration Guide. +// in the AWS Proton Administrator Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1030,7 +1030,7 @@ func (c *Proton) CreateServiceTemplateRequest(input *CreateServiceTemplateInput) // Returned Error Types: // * ServiceQuotaExceededException // A quota was exceeded. For more information, see AWS Proton Quotas (https://docs.aws.amazon.com/proton/latest/adminguide/ag-limits.html) -// in the AWS Proton Administration Guide. +// in the AWS Proton Administrator Guide. // // * ValidationException // The input is invalid or an out-of-range value was supplied for the input @@ -1129,7 +1129,7 @@ func (c *Proton) CreateServiceTemplateVersionRequest(input *CreateServiceTemplat // Returned Error Types: // * ServiceQuotaExceededException // A quota was exceeded. For more information, see AWS Proton Quotas (https://docs.aws.amazon.com/proton/latest/adminguide/ag-limits.html) -// in the AWS Proton Administration Guide. +// in the AWS Proton Administrator Guide. // // * ValidationException // The input is invalid or an out-of-range value was supplied for the input @@ -1320,7 +1320,7 @@ func (c *Proton) DeleteEnvironmentAccountConnectionRequest(input *DeleteEnvironm // up provisioned resources that remain without an environment connection. // // For more information, see Environment account connections (proton/latest/adminguide/ag-env-account-connections.html) -// in the AWS Proton Administration guide. +// in the AWS Proton Administrator guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2098,7 +2098,7 @@ func (c *Proton) GetEnvironmentAccountConnectionRequest(input *GetEnvironmentAcc // connection. // // For more information, see Environment account connections (proton/latest/adminguide/ag-env-account-connections.html) -// in the AWS Proton Administration guide. +// in the AWS Proton Administrator guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2752,7 +2752,7 @@ func (c *Proton) ListEnvironmentAccountConnectionsRequest(input *ListEnvironment // View a list of environment account connections. // // For more information, see Environment account connections (proton/latest/adminguide/ag-env-account-connections.html) -// in the AWS Proton Administration guide. +// in the AWS Proton Administrator guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3941,7 +3941,7 @@ func (c *Proton) ListTagsForResourceRequest(input *ListTagsForResourceInput) (re // ListTagsForResource API operation for AWS Proton. // // List tags for a resource. For more information, see AWS Proton resources -// and tagging in the AWS Proton Administration Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) +// and tagging in the AWS Proton Administrator Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) // or AWS Proton User Guide (https://docs.aws.amazon.com/proton/latest/userguide/resources.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4096,7 +4096,7 @@ func (c *Proton) RejectEnvironmentAccountConnectionRequest(input *RejectEnvironm // an environment. // // For more information, see Environment account connections (proton/latest/adminguide/ag-env-account-connections.html) -// in the AWS Proton Administration guide. +// in the AWS Proton Administrator guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4193,7 +4193,7 @@ func (c *Proton) TagResourceRequest(input *TagResourceInput) (req *request.Reque // TagResource API operation for AWS Proton. // // Tag a resource. For more information, see AWS Proton resources and tagging -// in the AWS Proton Administration Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) +// in the AWS Proton Administrator Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) // or AWS Proton User Guide (https://docs.aws.amazon.com/proton/latest/userguide/resources.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4291,7 +4291,7 @@ func (c *Proton) UntagResourceRequest(input *UntagResourceInput) (req *request.R // UntagResource API operation for AWS Proton. // // Remove a tag from a resource. For more information, see AWS Proton resources -// and tagging in the AWS Proton Administration Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) +// and tagging in the AWS Proton Administrator Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) // or AWS Proton User Guide (https://docs.aws.amazon.com/proton/latest/userguide/resources.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4487,7 +4487,7 @@ func (c *Proton) UpdateEnvironmentRequest(input *UpdateEnvironmentInput) (req *r // // You can only update to a new environment account connection if it was created // in the same environment account that the current environment account connection -// was created in and associated with the current environment. +// was created in and is associated with the current environment. // // If the environment isn't associated with an environment account connection, // don't update or include the environmentAccountConnectionId parameter to update @@ -4621,7 +4621,7 @@ func (c *Proton) UpdateEnvironmentAccountConnectionRequest(input *UpdateEnvironm // a new IAM role. // // For more information, see Environment account connections (proton/latest/adminguide/ag-env-account-connections.html) -// in the AWS Proton Administration guide. +// in the AWS Proton Administrator guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4925,7 +4925,7 @@ func (c *Proton) UpdateServiceRequest(input *UpdateServiceInput) (req *request.R // Returned Error Types: // * ServiceQuotaExceededException // A quota was exceeded. For more information, see AWS Proton Quotas (https://docs.aws.amazon.com/proton/latest/adminguide/ag-limits.html) -// in the AWS Proton Administration Guide. +// in the AWS Proton Administrator Guide. // // * ValidationException // The input is invalid or an out-of-range value was supplied for the input @@ -6040,7 +6040,7 @@ type CreateEnvironmentInput struct { // must include either the environmentAccountConnectionId or protonServiceRoleArn // parameter and value. For more information, see Environment account connections // (proton/latest/adminguide/ag-env-account-connections.html) in the AWS Proton - // Administration guide. + // Administrator guide. EnvironmentAccountConnectionId *string `locationName:"environmentAccountConnectionId" type:"string"` // The name of the environment. @@ -6056,14 +6056,14 @@ type CreateEnvironmentInput struct { // A link to a YAML formatted spec file that provides inputs as defined in the // environment template bundle schema file. For more information, see Environments - // (https://docs.aws.amazon.com/proton/latest/adminguide/ag-managing-environments.html) - // in the AWS Proton Administration Guide. + // (https://docs.aws.amazon.com/proton/latest/adminguide/ag-environments.html) + // in the AWS Proton Administrator Guide. // // Spec is a required field Spec *string `locationName:"spec" min:"1" type:"string" required:"true" sensitive:"true"` // Create tags for your environment. For more information, see AWS Proton resources - // and tagging in the AWS Proton Administration Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) + // and tagging in the AWS Proton Administrator Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) // or AWS Proton User Guide (https://docs.aws.amazon.com/proton/latest/userguide/resources.html). Tags []*Tag `locationName:"tags" type:"list"` @@ -6076,8 +6076,8 @@ type CreateEnvironmentInput struct { TemplateMinorVersion *string `locationName:"templateMinorVersion" min:"1" type:"string"` // The name of the environment template. For more information, see Environment - // Templates (https://docs.aws.amazon.com/proton/latest/adminguide/ag-env-templates.html) - // in the AWS Proton Administration Guide. + // Templates (https://docs.aws.amazon.com/proton/latest/adminguide/ag-templates.html) + // in the AWS Proton Administrator Guide. // // TemplateName is a required field TemplateName *string `locationName:"templateName" min:"1" type:"string" required:"true"` @@ -6244,7 +6244,7 @@ type CreateEnvironmentTemplateInput struct { Provisioning *string `locationName:"provisioning" type:"string" enum:"Provisioning"` // Create tags for your environment template. For more information, see AWS - // Proton resources and tagging in the AWS Proton Administration Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) + // Proton resources and tagging in the AWS Proton Administrator Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) // or AWS Proton User Guide (https://docs.aws.amazon.com/proton/latest/userguide/resources.html). Tags []*Tag `locationName:"tags" type:"list"` } @@ -6508,9 +6508,9 @@ type CreateServiceInput struct { // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // The ARN of the repository connection. For more information, see Set up repository - // connection (https://docs.aws.amazon.com/proton/latest/adminguide/setting-up-for-service.html#settingSS-up-vcontrol) - // in the AWS Proton Administration Guide and Getting started (https://docs.aws.amazon.com/proton/latest/userguide/ug-getting-started.html#getting-started-step1) + // The Amazon Resource Name (ARN) of the repository connection. For more information, + // see Set up repository connection (https://docs.aws.amazon.com/proton/latest/adminguide/setting-up-for-service.html#setting-up-vcontrol) + // in the AWS Proton Administrator Guide and Setting up with AWS Proton (https://docs.aws.amazon.com/proton/latest/userguide/proton-setup.html#setup-repo-connection) // in the AWS Proton User Guide. Don't include this parameter if your service // template doesn't include a service pipeline. RepositoryConnectionArn *string `locationName:"repositoryConnectionArn" min:"1" type:"string"` @@ -6523,14 +6523,14 @@ type CreateServiceInput struct { // bundle schema file. The spec file is in YAML format. Don’t include pipeline // inputs in the spec if your service template doesn’t include a service pipeline. // For more information, see Create a service (https://docs.aws.amazon.com/proton/latest/adminguide/ag-create-svc.html.html) - // in the AWS Proton Administration Guide and Create a service (https://docs.aws.amazon.com/proton/latest/userguide/ug-svc-create.html) + // in the AWS Proton Administrator Guide and Create a service (https://docs.aws.amazon.com/proton/latest/userguide/ug-svc-create.html) // in the AWS Proton User Guide. // // Spec is a required field Spec *string `locationName:"spec" min:"1" type:"string" required:"true" sensitive:"true"` // Create tags for your service. For more information, see AWS Proton resources - // and tagging in the AWS Proton Administration Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) + // and tagging in the AWS Proton Administrator Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) // or AWS Proton User Guide (https://docs.aws.amazon.com/proton/latest/userguide/resources.html). Tags []*Tag `locationName:"tags" type:"list"` @@ -6721,12 +6721,12 @@ type CreateServiceTemplateInput struct { // AWS Proton includes a service pipeline for your service by default. When // included, this parameter indicates that an AWS Proton service pipeline won't // be included for your service. Once specified, this parameter can't be changed. - // For more information, see Service template bundles (https://docs.aws.amazon.com/proton/latest/adminguide/ag-svc-template-bundles.html) - // in the AWS Proton Administration Guide. + // For more information, see Service template bundles (https://docs.aws.amazon.com/proton/latest/adminguide/ag-template-bundles.html) + // in the AWS Proton Administrator Guide. PipelineProvisioning *string `locationName:"pipelineProvisioning" type:"string" enum:"Provisioning"` // Create tags for your service template. For more information, see AWS Proton - // resources and tagging in the AWS Proton Administration Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) + // resources and tagging in the AWS Proton Administrator Guide (https://docs.aws.amazon.com/proton/latest/adminguide/resources.html) // or AWS Proton User Guide (https://docs.aws.amazon.com/proton/latest/userguide/resources.html). Tags []*Tag `locationName:"tags" type:"list"` } @@ -7564,8 +7564,8 @@ type Environment struct { // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // The ARN of the AWS Proton service role that allows AWS Proton to make calls - // to other services on your behalf. + // The Amazon Resource Name (ARN) of the AWS Proton service role that allows + // AWS Proton to make calls to other services on your behalf. ProtonServiceRoleArn *string `locationName:"protonServiceRoleArn" min:"1" type:"string"` // When included, indicates that the environment template is for customer provisioned @@ -7585,7 +7585,7 @@ type Environment struct { // TemplateMinorVersion is a required field TemplateMinorVersion *string `locationName:"templateMinorVersion" min:"1" type:"string" required:"true"` - // The ARN of the environment template. + // The Amazon Resource Name (ARN) of the environment template. // // TemplateName is a required field TemplateName *string `locationName:"templateName" min:"1" type:"string" required:"true"` @@ -7978,8 +7978,8 @@ type EnvironmentSummary struct { // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // The ARN of the AWS Proton service role that allows AWS Proton to make calls - // to other services on your behalf. + // The Amazon Resource Name (ARN) of the AWS Proton service role that allows + // AWS Proton to make calls to other services on your behalf. ProtonServiceRoleArn *string `locationName:"protonServiceRoleArn" min:"1" type:"string"` // When included, indicates that the environment template is for customer provisioned @@ -10112,7 +10112,7 @@ type ListTagsForResourceInput struct { // resource tags, after the list of resource tags that was previously requested. NextToken *string `locationName:"nextToken" type:"string"` - // The ARN of the resource for the listed tags. + // The Amazon Resource Name (ARN) of the resource for the listed tags. // // ResourceArn is a required field ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` @@ -10412,9 +10412,9 @@ type Service struct { // The service pipeline detail data. Pipeline *ServicePipeline `locationName:"pipeline" type:"structure"` - // The ARN of the repository connection. For more information, see Set up a - // repository connection (https://docs.aws.amazon.com/proton/latest/adminguide/setting-up-for-service.html#setting-up-vcontrol) - // in the AWS Proton Administration Guide and Getting started (https://docs.aws.amazon.com/proton/latest/userguide/ug-getting-started.html#getting-started-step1) + // The Amazon Resource Name (ARN) of the repository connection. For more information, + // see Set up a repository connection (https://docs.aws.amazon.com/proton/latest/adminguide/setting-up-for-service.html#setting-up-vcontrol) + // in the AWS Proton Administrator Guide and Setting up with AWS Proton (https://docs.aws.amazon.com/proton/latest/userguide/proton-setup.html#setup-repo-connection) // in the AWS Proton User Guide. RepositoryConnectionArn *string `locationName:"repositoryConnectionArn" min:"1" type:"string"` @@ -10953,7 +10953,7 @@ func (s *ServicePipeline) SetTemplateName(v string) *ServicePipeline { } // A quota was exceeded. For more information, see AWS Proton Quotas (https://docs.aws.amazon.com/proton/latest/adminguide/ag-limits.html) -// in the AWS Proton Administration Guide. +// in the AWS Proton Administrator Guide. type ServiceQuotaExceededException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -12057,7 +12057,7 @@ type UpdateEnvironmentInput struct { // // You can only update to a new environment account connection if it was created // in the same environment account that the current environment account connection - // was created in and associated with the current environment. + // was created in and is associated with the current environment. EnvironmentAccountConnectionId *string `locationName:"environmentAccountConnectionId" type:"string"` // The name of the environment to update. @@ -12065,8 +12065,8 @@ type UpdateEnvironmentInput struct { // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // The ARN of the AWS Proton service role that allows AWS Proton to make API - // calls to other services your behalf. + // The Amazon Resource Name (ARN) of the AWS Proton service role that allows + // AWS Proton to make API calls to other services your behalf. ProtonServiceRoleArn *string `locationName:"protonServiceRoleArn" min:"1" type:"string"` // The formatted specification that defines the update. @@ -12413,7 +12413,7 @@ type UpdateServiceInput struct { // Lists the service instances to add and the existing service instances to // remain. Omit the existing service instances to delete from the list. Don't // include edits to the existing service instances or pipeline. For more information, - // see Edit a service in the AWS Proton Administration Guide (https://docs.aws.amazon.com/proton/latest/adminguide/ag-svc-update.html) + // see Edit a service in the AWS Proton Administrator Guide (https://docs.aws.amazon.com/proton/latest/adminguide/ag-svc-update.html) // or the AWS Proton User Guide (https://docs.aws.amazon.com/proton/latest/userguide/ug-svc-update.html). Spec *string `locationName:"spec" min:"1" type:"string" sensitive:"true"` } diff --git a/service/proton/doc.go b/service/proton/doc.go index 2c2e0cc470..273a0bba58 100644 --- a/service/proton/doc.go +++ b/service/proton/doc.go @@ -28,7 +28,7 @@ // on AWS Proton, developers need permissions to the service create, list, update // and delete API operations and the service instance list and update API operations. // -// To learn more about AWS Proton administration, see the AWS Proton Administration +// To learn more about AWS Proton administration, see the AWS Proton Administrator // Guide (https://docs.aws.amazon.com/proton/latest/adminguide/Welcome.html). // // To learn more about deploying serverless and containerized applications on @@ -90,22 +90,45 @@ // // * CreateEnvironmentAccountConnection // -//

Idempotent delete APIs

Given a request action that -// has succeeded:

When you retry the request with an API from this -// group and the resource was deleted, its metadata is returned in the response.

-//

If you retry and the resource doesn't exist, the response is empty.

-//

In both cases, the retry succeeds.

Idempotent delete APIs:

-//
  • DeleteEnvironmentTemplate

  • DeleteEnvironmentTemplateVersion

    -//
  • DeleteServiceTemplate

  • DeleteServiceTemplateVersion

    -//
  • DeleteEnvironmentAccountConnection

Asynchronous -// idempotent delete APIs

Given a request action that has succeeded:

-//

If you retry the request with an API from this group, if the original -// request delete operation status is DELETE_IN_PROGRESS, the -// retry returns the resource detail data in the response without performing -// any further actions.

If the original request delete operation is -// complete, a retry returns an empty response.

Asynchronous idempotent -// delete APIs:

  • DeleteEnvironment

  • DeleteService

    -//
+// Idempotent delete APIs +// +// Given a request action that has succeeded: +// +// When you retry the request with an API from this group and the resource was +// deleted, its metadata is returned in the response. +// +// If you retry and the resource doesn't exist, the response is empty. +// +// In both cases, the retry succeeds. +// +// Idempotent delete APIs: +// +// * DeleteEnvironmentTemplate +// +// * DeleteEnvironmentTemplateVersion +// +// * DeleteServiceTemplate +// +// * DeleteServiceTemplateVersion +// +// * DeleteEnvironmentAccountConnection +// +// Asynchronous idempotent delete APIs +// +// Given a request action that has succeeded: +// +// If you retry the request with an API from this group, if the original request +// delete operation status is DELETE_IN_PROGRESS, the retry returns the resource +// detail data in the response without performing any further actions. +// +// If the original request delete operation is complete, a retry returns an +// empty response. +// +// Asynchronous idempotent delete APIs: +// +// * DeleteEnvironment +// +// * DeleteService // // See https://docs.aws.amazon.com/goto/WebAPI/proton-2020-07-20 for more information on this service. // diff --git a/service/proton/errors.go b/service/proton/errors.go index 9404ca87bd..dafc35bf87 100644 --- a/service/proton/errors.go +++ b/service/proton/errors.go @@ -36,7 +36,7 @@ const ( // "ServiceQuotaExceededException". // // A quota was exceeded. For more information, see AWS Proton Quotas (https://docs.aws.amazon.com/proton/latest/adminguide/ag-limits.html) - // in the AWS Proton Administration Guide. + // in the AWS Proton Administrator Guide. ErrCodeServiceQuotaExceededException = "ServiceQuotaExceededException" // ErrCodeThrottlingException for service response error code diff --git a/service/rds/api.go b/service/rds/api.go index fa81b26cc6..759545d6ad 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -25097,6 +25097,10 @@ type DBSnapshot struct { // Provides the option group name for the DB snapshot. OptionGroupName *string `type:"string"` + // Specifies the time of the CreateDBSnapshot operation in Coordinated Universal + // Time (UTC). Doesn't change when the snapshot is copied. + OriginalSnapshotCreateTime *time.Time `type:"timestamp"` + // The percentage of the estimated data that has been transferred. PercentProgress *int64 `type:"integer"` @@ -25109,13 +25113,15 @@ type DBSnapshot struct { ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` // Specifies when the snapshot was taken in Coordinated Universal Time (UTC). + // Changes for the copy when the snapshot is copied. SnapshotCreateTime *time.Time `type:"timestamp"` // Provides the type of the DB snapshot. SnapshotType *string `type:"string"` // The DB snapshot Amazon Resource Name (ARN) that the DB snapshot was copied - // from. It only has value in case of cross-customer or cross-region copy. + // from. It only has a value in the case of a cross-account or cross-Region + // copy. SourceDBSnapshotIdentifier *string `type:"string"` // The Amazon Web Services Region that the DB snapshot was created in or copied @@ -25250,6 +25256,12 @@ func (s *DBSnapshot) SetOptionGroupName(v string) *DBSnapshot { return s } +// SetOriginalSnapshotCreateTime sets the OriginalSnapshotCreateTime field's value. +func (s *DBSnapshot) SetOriginalSnapshotCreateTime(v time.Time) *DBSnapshot { + s.OriginalSnapshotCreateTime = &v + return s +} + // SetPercentProgress sets the PercentProgress field's value. func (s *DBSnapshot) SetPercentProgress(v int64) *DBSnapshot { s.PercentProgress = &v @@ -27990,9 +28002,19 @@ type DescribeDBClustersInput struct { // // Supported filters: // + // * clone-group-id - Accepts clone group identifiers. The results list will + // only include information about the DB clusters associated with these clone + // groups. + // // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon // Resource Names (ARNs). The results list will only include information // about the DB clusters identified by these ARNs. + // + // * domain - Accepts Active Directory directory IDs. The results list will + // only include information about the DB clusters associated with these domains. + // + // * engine - Accepts engine names. The results list will only include information + // about the DB clusters for these engines. Filters []*Filter `locationNameList:"Filter" type:"list"` // Optional Boolean parameter that specifies whether the output includes information @@ -30834,13 +30856,7 @@ func (s *DescribeExportTasksOutput) SetMarker(v string) *DescribeExportTasksOutp type DescribeGlobalClustersInput struct { _ struct{} `type:"structure"` - // A filter that specifies one or more global DB clusters to describe. - // - // Supported filters: - // - // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon - // Resource Names (ARNs). The results list will only include information - // about the DB clusters identified by these ARNs. + // This parameter isn't currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` // The user-supplied DB cluster identifier. If this parameter is specified,