From e1a041f6dba268ba201fa8e94b8fd0a317d5a018 Mon Sep 17 00:00:00 2001 From: AWS CDK Automation <43080478+aws-cdk-automation@users.noreply.github.com> Date: Wed, 5 Jan 2022 01:48:19 -0800 Subject: [PATCH 001/374] docs(cfnspec): update CloudFormation documentation (#18273) Co-authored-by: AWS CDK Team --- .../spec-source/cfn-docs/cfn-docs.json | 152 +++++++++--------- 1 file changed, 76 insertions(+), 76 deletions(-) diff --git a/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json b/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json index aca564579db44..8836c7612e0dc 100644 --- a/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json +++ b/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json @@ -5976,7 +5976,7 @@ "description": "The `AWS::Chatbot::SlackChannelConfiguration` resource configures a Slack channel to allow users to use AWS Chatbot with AWS CloudFormation templates.\n\nThis resource requires some setup to be done in the AWS Chatbot console. To provide the required Slack workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console, then copy and paste the workspace ID from the console. For more details, see steps 1-4 in [Setting Up AWS Chatbot with Slack](https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro) in the *AWS Chatbot User Guide* .", "properties": { "ConfigurationName": "The name of the configuration.", - "GuardrailPolicies": "The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.", + "GuardrailPolicies": "The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set. Currently, only 1 IAM policy is supported.", "IamRoleArn": "The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\nThis is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see [IAM Policies for AWS Chatbot](https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html) .", "LoggingLevel": "Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\nLogging levels include `ERROR` , `INFO` , or `NONE` .", "SlackChannelId": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, `ABCBBLZZZ` .", @@ -7576,8 +7576,8 @@ "attributes": {}, "description": "Information about the Amazon S3 bucket that contains the code that will be committed to the new repository. Changes to this property are ignored after initial resource creation.", "properties": { - "Bucket": "The name of the Amazon S3 bucket that contains the ZIP file with the content that will be committed to the new repository. This can be specified using an ARN or the name of the bucket in the AWS account . Changes to this property are ignored after initial resource creation.", - "Key": "The key to use for accessing the Amazon S3 bucket. Changes to this property are ignored after initial resource creation.", + "Bucket": "The name of the Amazon S3 bucket that contains the ZIP file with the content that will be committed to the new repository. This can be specified using the name of the bucket in the AWS account . Changes to this property are ignored after initial resource creation.", + "Key": "The key to use for accessing the Amazon S3 bucket. Changes to this property are ignored after initial resource creation. For more information, see [Creating object key names](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html) and [Uploading objects](https://docs.aws.amazon.com/AmazonS3/latest/userguide/upload-objects.html) in the Amazon S3 User Guide.", "ObjectVersion": "The object version of the ZIP file, if versioning is enabled for the Amazon S3 bucket. Changes to this property are ignored after initial resource creation." } }, @@ -8247,14 +8247,14 @@ "EmailVerificationMessage": "A string representing the email verification message. EmailVerificationMessage is allowed only if [EmailSendingAccount](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) is DEVELOPER.", "EmailVerificationSubject": "A string representing the email verification subject. EmailVerificationSubject is allowed only if [EmailSendingAccount](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) is DEVELOPER.", "EnabledMfas": "Enables MFA on a specified user pool. To disable all MFAs after it has been enabled, set MfaConfiguration to \u201cOFF\u201d and remove EnabledMfas. MFAs can only be all disabled if MfaConfiguration is OFF. Once SMS_MFA is enabled, SMS_MFA can only be disabled by setting MfaConfiguration to \u201cOFF\u201d. Can be one of the following values:\n\n- `SMS_MFA` - Enables SMS MFA for the user pool. SMS_MFA can only be enabled if SMS configuration is provided.\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA`", - "LambdaConfig": "The Lambda trigger configuration information for the new user pool.\n\n> In a push model, event sources (such as Amazon S3 and custom applications) need permission to invoke a function. So you will need to make an extra call to add permission for these event sources to invoke your Lambda function.\n> \n> For more information on using the Lambda API to add permission, see [AddPermission](https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html) .\n> \n> For adding permission using the AWS CLI , see [add-permission](https://docs.aws.amazon.com/cli/latest/reference/lambda/add-permission.html) .", - "MfaConfiguration": "The multi-factor (MFA) configuration. Valid values include:\n\n- `OFF` MFA will not be used for any users.\n- `ON` MFA is required for all users to sign in.\n- `OPTIONAL` MFA will be required only for individual users who have an MFA factor enabled.", + "LambdaConfig": "The Lambda trigger configuration information for the new user pool.\n\n> In a push model, event sources (such as Amazon S3 and custom applications) need permission to invoke a function. So you must make an extra call to add permission for these event sources to invoke your Lambda function.\n> \n> For more information on using the Lambda API to add permission, see [AddPermission](https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html) .\n> \n> For adding permission using the AWS CLI , see [add-permission](https://docs.aws.amazon.com/cli/latest/reference/lambda/add-permission.html) .", + "MfaConfiguration": "The multi-factor (MFA) configuration. Valid values include:\n\n- `OFF` MFA won't be used for any users.\n- `ON` MFA is required for all users to sign in.\n- `OPTIONAL` MFA will be required only for individual users who have an MFA factor activated.", "Policies": "The policy associated with a user pool.", "Schema": "The schema attributes for the new user pool. These attributes can be standard or custom attributes.\n\n> During a user pool update, you can add new schema attributes but you cannot modify or delete an existing schema attribute.", "SmsAuthenticationMessage": "A string representing the SMS authentication message.", "SmsConfiguration": "The SMS configuration.", "SmsVerificationMessage": "A string representing the SMS verification message.", - "UserPoolAddOns": "Used to enable advanced security risk detection. Set the key `AdvancedSecurityMode` to the value \"AUDIT\".", + "UserPoolAddOns": "Enables advanced security risk detection. Set the key `AdvancedSecurityMode` to the value \"AUDIT\".", "UserPoolName": "A string used to name the user pool.", "UserPoolTags": "The tag keys and values to assign to the user pool. A tag is a label that you can use to categorize and manage user pools in different ways, such as by purpose, owner, environment, or other criteria.", "UsernameAttributes": "Determines whether email addresses or phone numbers can be specified as user names when a user signs up. Possible values: `phone_number` or `email` .\n\nThis user pool property cannot be updated.", @@ -8275,7 +8275,7 @@ "properties": { "AllowAdminCreateUserOnly": "Set to `True` if only the administrator is allowed to create user profiles. Set to `False` if users can sign themselves up via an app.", "InviteMessageTemplate": "The message template to be used for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) .", - "UnusedAccountValidityDays": "The user account expiration limit, in days, after which the account is no longer usable. To reset the account after that time limit, you must call `AdminCreateUser` again, specifying `\"RESEND\"` for the `MessageAction` parameter. The default value for this parameter is 7.\n\n> If you set a value for `TemporaryPasswordValidityDays` in `PasswordPolicy` , that value will be used and `UnusedAccountValidityDays` will be deprecated for that user pool." + "UnusedAccountValidityDays": "The user account expiration limit, in days, after which the account is no longer usable. To reset the account after that time limit, you must call `AdminCreateUser` again, specifying `\"RESEND\"` for the `MessageAction` parameter. The default value for this parameter is 7.\n\n> If you set a value for `TemporaryPasswordValidityDays` in `PasswordPolicy` , that value will be used, and `UnusedAccountValidityDays` will be no longer be an available parameter for that user pool." } }, "AWS::Cognito::UserPool.CustomEmailSender": { @@ -8296,10 +8296,10 @@ }, "AWS::Cognito::UserPool.DeviceConfiguration": { "attributes": {}, - "description": "The configuration for the user pool's device tracking.", + "description": "The device tracking configuration for a user pool. A user pool with device tracking deactivated returns a null value.\n\n> When you provide values for any DeviceConfiguration field, you activate device tracking.", "properties": { - "ChallengeRequiredOnNewDevice": "Indicates whether a challenge is required on a new device. Only applicable to a new device.", - "DeviceOnlyRememberedOnUserPrompt": "If true, a device is only remembered on user prompt." + "ChallengeRequiredOnNewDevice": "When true, device authentication can replace SMS and time-based one-time password (TOTP) factors for multi-factor authentication (MFA).\n\n> Users that sign in with devices that have not been confirmed or remembered will still have to provide a second factor, whether or not ChallengeRequiredOnNewDevice is true, when your user pool requires MFA.", + "DeviceOnlyRememberedOnUserPrompt": "When true, users can opt in to remembering their device. Your app code must use callback functions to return the user's choice." } }, "AWS::Cognito::UserPool.EmailConfiguration": { @@ -8307,10 +8307,10 @@ "description": "The email configuration.", "properties": { "ConfigurationSet": "The set of configuration rules that can be applied to emails sent using Amazon SES. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:\n\n- Event publishing \u2013 Amazon SES can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as SNS and CloudWatch.\n- IP pool management \u2013 When leasing dedicated IP addresses with Amazon SES, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets.", - "EmailSendingAccount": "Specifies whether Amazon Cognito emails your users by using its built-in email functionality or your Amazon SES email configuration. Specify one of the following values:\n\n- **COGNITO_DEFAULT** - When Amazon Cognito emails your users, it uses its built-in email functionality. When you use the default option, Amazon Cognito allows only a limited number of emails each day for your user pool. For typical production environments, the default email limit is below the required delivery volume. To achieve a higher delivery volume, specify DEVELOPER to use your Amazon SES email configuration.\n\nTo look up the email delivery limit for the default option, see [Limits in Amazon Cognito](https://docs.aws.amazon.com/cognito/latest/developerguide/limits.html) in the *Amazon Cognito Developer Guide* .\n\nThe default FROM address is no-reply@verificationemail.com. To customize the FROM address, provide the ARN of an Amazon SES verified email address for the `SourceArn` parameter.\n\nIf EmailSendingAccount is COGNITO_DEFAULT, the following parameters aren't allowed:\n\n- EmailVerificationMessage\n- EmailVerificationSubject\n- InviteMessageTemplate.EmailMessage\n- InviteMessageTemplate.EmailSubject\n- VerificationMessageTemplate.EmailMessage\n- VerificationMessageTemplate.EmailMessageByLink\n- VerificationMessageTemplate.EmailSubject,\n- VerificationMessageTemplate.EmailSubjectByLink\n\n> DEVELOPER EmailSendingAccount is required.\n- **DEVELOPER** - When Amazon Cognito emails your users, it uses your Amazon SES configuration. Amazon Cognito calls Amazon SES on your behalf to send email from your verified email address. When you use this option, the email delivery limits are the same limits that apply to your Amazon SES verified email address in your AWS account .\n\nIf you use this option, you must provide the ARN of an Amazon SES verified email address for the `SourceArn` parameter.\n\nBefore Amazon Cognito can email your users, it requires additional permissions to call Amazon SES on your behalf. When you update your user pool with this option, Amazon Cognito creates a *service-linked role* , which is a type of IAM role, in your AWS account . This role contains the permissions that allow Amazon Cognito to access Amazon SES and send email messages with your address. For more information about the service-linked role that Amazon Cognito creates, see [Using Service-Linked Roles for Amazon Cognito](https://docs.aws.amazon.com/cognito/latest/developerguide/using-service-linked-roles.html) in the *Amazon Cognito Developer Guide* .", + "EmailSendingAccount": "Specifies whether Amazon Cognito emails your users by using its built-in email functionality or your Amazon Simple Email Service email configuration. Specify one of the following values:\n\n- **COGNITO_DEFAULT** - When Amazon Cognito emails your users, it uses its built-in email functionality. When you use the default option, Amazon Cognito allows only a limited number of emails each day for your user pool. For typical production environments, the default email limit is less than the required delivery volume. To achieve a higher delivery volume, specify DEVELOPER to use your Amazon SES email configuration.\n\nTo look up the email delivery limit for the default option, see [Limits in](https://docs.aws.amazon.com/cognito/latest/developerguide/limits.html) in the *Developer Guide* .\n\nThe default FROM address is `no-reply@verificationemail.com` . To customize the FROM address, provide the Amazon Resource Name (ARN) of an Amazon SES verified email address for the `SourceArn` parameter.\n\nIf EmailSendingAccount is COGNITO_DEFAULT, you can't use the following parameters:\n\n- EmailVerificationMessage\n- EmailVerificationSubject\n- InviteMessageTemplate.EmailMessage\n- InviteMessageTemplate.EmailSubject\n- VerificationMessageTemplate.EmailMessage\n- VerificationMessageTemplate.EmailMessageByLink\n- VerificationMessageTemplate.EmailSubject,\n- VerificationMessageTemplate.EmailSubjectByLink\n\n> DEVELOPER EmailSendingAccount is required.\n- **DEVELOPER** - When Amazon Cognito emails your users, it uses your Amazon SES configuration. Amazon Cognito calls Amazon SES on your behalf to send email from your verified email address. When you use this option, the email delivery limits are the same limits that apply to your Amazon SES verified email address in your AWS account .\n\nIf you use this option, you must provide the ARN of an Amazon SES verified email address for the `SourceArn` parameter.\n\nBefore Amazon Cognito can email your users, it requires additional permissions to call Amazon SES on your behalf. When you update your user pool with this option, Amazon Cognito creates a *service-linked role* , which is a type of role, in your AWS account . This role contains the permissions that allow to access Amazon SES and send email messages with your address. For more information about the service-linked role that Amazon Cognito creates, see [Using Service-Linked Roles for Amazon Cognito](https://docs.aws.amazon.com/cognito/latest/developerguide/using-service-linked-roles.html) in the *Amazon Cognito Developer Guide* .", "From": "Identifies either the sender's email address or the sender's name with their email address. For example, `testuser@example.com` or `Test User ` . This address appears before the body of the email.", - "ReplyToEmailAddress": "The destination to which the receiver of the email should reply to.", - "SourceArn": "The Amazon Resource Name (ARN) of a verified email address in Amazon SES. This email address is used in one of the following ways, depending on the value that you specify for the `EmailSendingAccount` parameter:\n\n- If you specify `COGNITO_DEFAULT` , Amazon Cognito uses this address as the custom FROM address when it emails your users by using its built-in email account.\n- If you specify `DEVELOPER` , Amazon Cognito emails your users with this address by calling Amazon SES on your behalf." + "ReplyToEmailAddress": "The destination to which the receiver of the email should reply.", + "SourceArn": "The ARN of a verified email address in Amazon SES. Amazon Cognito uses this email address in one of the following ways, depending on the value that you specify for the `EmailSendingAccount` parameter:\n\n- If you specify `COGNITO_DEFAULT` , Amazon Cognito uses this address as the custom FROM address when it emails your users using its built-in email account.\n- If you specify `DEVELOPER` , Amazon Cognito emails your users with this address by calling Amazon SES on your behalf." } }, "AWS::Cognito::UserPool.InviteMessageTemplate": { @@ -8343,7 +8343,7 @@ }, "AWS::Cognito::UserPool.NumberAttributeConstraints": { "attributes": {}, - "description": "The minimum and maximum value of an attribute that is of the number data type.", + "description": "The minimum and maximum values of an attribute that is of the number data type.", "properties": { "MaxValue": "The maximum value of an attribute that is of the number data type.", "MinValue": "The minimum value of an attribute that is of the number data type." @@ -8353,12 +8353,12 @@ "attributes": {}, "description": "The password policy type.", "properties": { - "MinimumLength": "The minimum length of the password policy that you have set. Cannot be less than 6.", + "MinimumLength": "The minimum length of the password in the policy that you have set. This value can't be less than 6.", "RequireLowercase": "In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.", "RequireNumbers": "In the password policy that you have set, refers to whether you have required users to use at least one number in their password.", "RequireSymbols": "In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.", "RequireUppercase": "In the password policy that you have set, refers to whether you have required users to use at least one uppercase letter in their password.", - "TemporaryPasswordValidityDays": "In the password policy you have set, refers to the number of days a temporary password is valid. If the user does not sign-in during this time, their password will need to be reset by an administrator.\n\n> When you set `TemporaryPasswordValidityDays` for a user pool, you will no longer be able to set the deprecated `UnusedAccountValidityDays` value for that user pool." + "TemporaryPasswordValidityDays": "The number of days a temporary password is valid in the password policy. If the user doesn't sign in during this time, an administrator must reset their password.\n\n> When you set `TemporaryPasswordValidityDays` for a user pool, you can no longer set the deprecated `UnusedAccountValidityDays` value for that user pool." } }, "AWS::Cognito::UserPool.Policies": { @@ -8382,10 +8382,10 @@ "properties": { "AttributeDataType": "The attribute data type.", "DeveloperOnlyAttribute": "> We recommend that you use [WriteAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UserPoolClientType.html#CognitoUserPools-Type-UserPoolClientType-WriteAttributes) in the user pool client to control how attributes can be mutated for new use cases instead of using `DeveloperOnlyAttribute` . \n\nSpecifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users will not be able to modify this attribute using their access token.", - "Mutable": "Specifies whether the value of the attribute can be changed.\n\nFor any user pool attribute that's mapped to an identity provider attribute, you must set this parameter to `true` . Amazon Cognito updates mapped attributes when users sign in to your application through an identity provider. If an attribute is immutable, Amazon Cognito throws an error when it attempts to update the attribute. For more information, see [Specifying Identity Provider Attribute Mappings for Your User Pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-specifying-attribute-mapping.html) .", + "Mutable": "Specifies whether the value of the attribute can be changed.\n\nFor any user pool attribute that is mapped to an identity provider attribute, you must set this parameter to `true` . Amazon Cognito updates mapped attributes when users sign in to your application through an identity provider. If an attribute is immutable, Amazon Cognito throws an error when it attempts to update the attribute. For more information, see [Specifying Identity Provider Attribute Mappings for Your User Pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-specifying-attribute-mapping.html) .", "Name": "A schema attribute of the name type.", "NumberAttributeConstraints": "Specifies the constraints for an attribute of the number type.", - "Required": "Specifies whether a user pool attribute is required. If the attribute is required and the user does not provide a value, registration or sign-in will fail.", + "Required": "Specifies whether a user pool attribute is required. If the attribute is required and the user doesn't provide a value, registration or sign-in will fail.", "StringAttributeConstraints": "Specifies the constraints for an attribute of the string type." } }, @@ -8394,7 +8394,7 @@ "description": "The SMS configuration type that includes the settings the Cognito User Pool needs to call for the Amazon SNS service to send an SMS message from your AWS account . The Cognito User Pool makes the request to the Amazon SNS Service by using an IAM role that you provide for your AWS account .", "properties": { "ExternalId": "The external ID is a value. We recommend you use `ExternalId` to add security to your IAM role, which is used to call Amazon SNS to send SMS messages for your user pool. If you provide an `ExternalId` , the Cognito User Pool uses it when attempting to assume your IAM role. You can also set your roles trust policy to require the `ExternalID` . If you use the Cognito Management Console to create a role for SMS MFA, Cognito creates a role with the required permissions and a trust policy that uses `ExternalId` .", - "SnsCallerArn": "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) caller. This is the ARN of the IAM role in your AWS account which Cognito will use to send SMS messages. SMS messages are subject to a [spending limit](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-email-phone-verification.html) ." + "SnsCallerArn": "The Amazon Resource Name (ARN) of the Amazon SNS caller. This is the ARN of the IAM role in your AWS account that Amazon Cognito will use to send SMS messages. SMS messages are subject to a [spending limit](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-email-phone-verification.html) ." } }, "AWS::Cognito::UserPool.StringAttributeConstraints": { @@ -8416,7 +8416,7 @@ "attributes": {}, "description": "The `UsernameConfiguration` property type specifies case sensitivity on the username input for the selected sign-in option.", "properties": { - "CaseSensitive": "Specifies whether username case sensitivity will be applied for all users in the user pool through Cognito APIs.\n\nValid values include:\n\n- *`True`* : Enables case sensitivity for all username input. When this option is set to `True` , users must sign in using the exact capitalization of their given username. For example, \u201cUserName\u201d. This is the default value.\n- *`False`* : Enables case insensitivity for all username input. For example, when this option is set to `False` , users will be able to sign in using either \"username\" or \"Username\". This option also enables both `preferred_username` and `email` alias to be case insensitive, in addition to the `username` attribute." + "CaseSensitive": "Specifies whether username case sensitivity will be applied for all users in the user pool through Amazon Cognito APIs.\n\nValid values include:\n\n- *`True`* : Enables case sensitivity for all username input. When this option is set to `True` , users must sign in using the exact capitalization of their given username, such as \u201cUserName\u201d. This is the default value.\n- *`False`* : Enables case insensitivity for all username input. For example, when this option is set to `False` , users can sign in using either \"username\" or \"Username\". This option also enables both `preferred_username` and `email` alias to be case insensitive, in addition to the `username` attribute." } }, "AWS::Cognito::UserPool.VerificationMessageTemplate": { @@ -8439,34 +8439,34 @@ "properties": { "AccessTokenValidity": "The time limit, after which the access token is no longer valid and cannot be used.", "AllowedOAuthFlows": "The allowed OAuth flows.\n\nSet to `code` to initiate a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the token endpoint.\n\nSet to `implicit` to specify that the client should get the access token (and, optionally, ID token, based on scopes) directly.\n\nSet to `client_credentials` to specify that the client should get the access token (and, optionally, ID token, based on scopes) from the token endpoint using a combination of client and client_secret.", - "AllowedOAuthFlowsUserPoolClient": "Set to true if the client is allowed to follow the OAuth protocol when interacting with Cognito user pools.", + "AllowedOAuthFlowsUserPoolClient": "Set to true if the client is allowed to follow the OAuth protocol when interacting with Amazon Cognito user pools.", "AllowedOAuthScopes": "The allowed OAuth scopes. Possible values provided by OAuth are: `phone` , `email` , `openid` , and `profile` . Possible values provided by AWS are: `aws.cognito.signin.user.admin` . Custom scopes created in Resource Servers are also supported.", - "AnalyticsConfiguration": "The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.\n\n> In regions where Pinpoint is not available, Cognito User Pools only supports sending events to Amazon Pinpoint projects in us-east-1. In regions where Pinpoint is available, Cognito User Pools will support sending events to Amazon Pinpoint projects within that same region.", + "AnalyticsConfiguration": "The Amazon Pinpoint analytics configuration for collecting metrics for this user pool.\n\n> In AWS Regions where isn't available, User Pools only supports sending events to Amazon Pinpoint projects in AWS Region us-east-1. In Regions where is available, User Pools will support sending events to Amazon Pinpoint projects within that same Region.", "CallbackURLs": "A list of allowed redirect (callback) URLs for the identity providers.\n\nA redirect URI must:\n\n- Be an absolute URI.\n- Be registered with the authorization server.\n- Not include a fragment component.\n\nSee [OAuth 2.0 - Redirection Endpoint](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6749#section-3.1.2) .\n\nAmazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.\n\nApp callback URLs such as myapp://example are also supported.", "ClientName": "The client name for the user pool client you would like to create.", "DefaultRedirectURI": "The default redirect URI. Must be in the `CallbackURLs` list.\n\nA redirect URI must:\n\n- Be an absolute URI.\n- Be registered with the authorization server.\n- Not include a fragment component.\n\nSee [OAuth 2.0 - Redirection Endpoint](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6749#section-3.1.2) .\n\nAmazon Cognito requires HTTPS over HTTP except for http://localhost for testing purposes only.\n\nApp callback URLs such as myapp://example are also supported.", - "EnableTokenRevocation": "Enables or disables token revocation. For more information about revoking tokens, see [RevokeToken](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_RevokeToken.html) .\n\nIf you don't include this parameter, token revocation is automatically enabled for the new user pool client.", - "ExplicitAuthFlows": "The authentication flows that are supported by the user pool clients. Flow names without the `ALLOW_` prefix are deprecated in favor of new names with the `ALLOW_` prefix. Note that values with `ALLOW_` prefix cannot be used along with values without `ALLOW_` prefix.\n\nValid values include:\n\n- `ALLOW_ADMIN_USER_PASSWORD_AUTH` : Enable admin based user password authentication flow `ADMIN_USER_PASSWORD_AUTH` . This setting replaces the `ADMIN_NO_SRP_AUTH` setting. With this authentication flow, Cognito receives the password in the request instead of using the SRP (Secure Remote Password protocol) protocol to verify passwords.\n- `ALLOW_CUSTOM_AUTH` : Enable Lambda trigger based authentication.\n- `ALLOW_USER_PASSWORD_AUTH` : Enable user password-based authentication. In this flow, Cognito receives the password in the request instead of using the SRP protocol to verify passwords.\n- `ALLOW_USER_SRP_AUTH` : Enable SRP based authentication.\n- `ALLOW_REFRESH_TOKEN_AUTH` : Enable authflow to refresh tokens.", + "EnableTokenRevocation": "Activates or deactivates token revocation. For more information about revoking tokens, see [RevokeToken](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_RevokeToken.html) .\n\nIf you don't include this parameter, token revocation is automatically activated for the new user pool client.", + "ExplicitAuthFlows": "The authentication flows that are supported by the user pool clients. Flow names without the `ALLOW_` prefix are no longer supported, in favor of new names with the `ALLOW_` prefix. Note that values with `ALLOW_` prefix must be used only along with the `ALLOW_` prefix.\n\nValid values include:\n\n- `ALLOW_ADMIN_USER_PASSWORD_AUTH` : Enable admin based user password authentication flow `ADMIN_USER_PASSWORD_AUTH` . This setting replaces the `ADMIN_NO_SRP_AUTH` setting. With this authentication flow, Amazon Cognito receives the password in the request instead of using the Secure Remote Password (SRP) protocol to verify passwords.\n- `ALLOW_CUSTOM_AUTH` : Enable AWS Lambda trigger based authentication.\n- `ALLOW_USER_PASSWORD_AUTH` : Enable user password-based authentication. In this flow, Amazon Cognito receives the password in the request instead of using the SRP protocol to verify passwords.\n- `ALLOW_USER_SRP_AUTH` : Enable SRP-based authentication.\n- `ALLOW_REFRESH_TOKEN_AUTH` : Enable authflow to refresh tokens.", "GenerateSecret": "Boolean to specify whether you want to generate a secret for the user pool client being created.", "IdTokenValidity": "The time limit, after which the ID token is no longer valid and cannot be used.", "LogoutURLs": "A list of allowed logout URLs for the identity providers.", "PreventUserExistenceErrors": "Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool.", "ReadAttributes": "The read attributes.", - "RefreshTokenValidity": "The time limit, in days, after which the refresh token is no longer valid and cannot be used.", + "RefreshTokenValidity": "The time limit, in days, after which the refresh token is no longer valid and can't be used.", "SupportedIdentityProviders": "A list of provider names for the identity providers that are supported on this client. The following are supported: `COGNITO` , `Facebook` , `SignInWithApple` , `Google` and `LoginWithAmazon` .", "TokenValidityUnits": "The units in which the validity times are represented in. Default for RefreshToken is days, and default for ID and access tokens are hours.", "UserPoolId": "The user pool ID for the user pool where you want to create a user pool client.", - "WriteAttributes": "The user pool attributes that the app client can write to.\n\nIf your app client allows users to sign in through an identity provider, this array must include all attributes that are mapped to identity provider attributes. Amazon Cognito updates mapped attributes when users sign in to your application through an identity provider. If your app client lacks write access to a mapped attribute, Amazon Cognito throws an error when it attempts to update the attribute. For more information, see [Specifying Identity Provider Attribute Mappings for Your User Pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-specifying-attribute-mapping.html) ." + "WriteAttributes": "The user pool attributes that the app client can write to.\n\nIf your app client allows users to sign in through an identity provider, this array must include all attributes that are mapped to identity provider attributes. Amazon Cognito updates mapped attributes when users sign in to your application through an identity provider. If your app client lacks write access to a mapped attribute, Amazon Cognito throws an error when it tries to update the attribute. For more information, see [Specifying Identity Provider Attribute Mappings for Your User Pool](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-specifying-attribute-mapping.html) ." } }, "AWS::Cognito::UserPoolClient.AnalyticsConfiguration": { "attributes": {}, - "description": "The Amazon Pinpoint analytics configuration for collecting metrics for a user pool.\n\n> In regions where Pinpoint is not available, Cognito User Pools only supports sending events to Amazon Pinpoint projects in us-east-1. In regions where Pinpoint is available, Cognito User Pools will support sending events to Amazon Pinpoint projects within that same region.", + "description": "The Amazon Pinpoint analytics configuration for collecting metrics for a user pool.\n\n> In Regions where Pinpoint isn't available, User Pools only supports sending events to Amazon Pinpoint projects in us-east-1. In Regions where Pinpoint is available, User Pools will support sending events to Amazon Pinpoint projects within that same Region.", "properties": { "ApplicationArn": "", "ApplicationId": "The application ID for an Amazon Pinpoint application.", "ExternalId": "The external ID.", - "RoleArn": "The ARN of an IAM role that authorizes Amazon Cognito to publish events to Amazon Pinpoint analytics.", + "RoleArn": "The ARN of an AWS Identity and Access Management role that authorizes Amazon Cognito to publish events to Amazon Pinpoint analytics.", "UserDataShared": "If `UserDataShared` is `true` , Amazon Cognito will include user data in the events it publishes to Amazon Pinpoint analytics." } }, @@ -8505,8 +8505,8 @@ "properties": { "Description": "A string containing the description of the group.", "GroupName": "The name of the group. Must be unique.", - "Precedence": "A nonnegative integer value that specifies the precedence of this group relative to the other groups that a user can belong to in the user pool. Zero is the highest precedence value. Groups with lower `Precedence` values take precedence over groups with higher or null `Precedence` values. If a user belongs to two or more groups, it is the group with the lowest precedence value whose role ARN will be used in the `cognito:roles` and `cognito:preferred_role` claims in the user's tokens.\n\nTwo groups can have the same `Precedence` value. If this happens, neither group takes precedence over the other. If two groups with the same `Precedence` have the same role ARN, that role is used in the `cognito:preferred_role` claim in tokens for users in each group. If the two groups have different role ARNs, the `cognito:preferred_role` claim is not set in users' tokens.\n\nThe default `Precedence` value is null.", - "RoleArn": "The role ARN for the group.", + "Precedence": "A non-negative integer value that specifies the precedence of this group relative to the other groups that a user can belong to in the user pool. Zero is the highest precedence value. Groups with lower `Precedence` values take precedence over groups with higher ornull `Precedence` values. If a user belongs to two or more groups, it is the group with the lowest precedence value whose role ARN is given in the user's tokens for the `cognito:roles` and `cognito:preferred_role` claims.\n\nTwo groups can have the same `Precedence` value. If this happens, neither group takes precedence over the other. If two groups with the same `Precedence` have the same role ARN, that role is used in the `cognito:preferred_role` claim in tokens for users in each group. If the two groups have different role ARNs, the `cognito:preferred_role` claim isn't set in users' tokens.\n\nThe default `Precedence` value is null.", + "RoleArn": "The role Amazon Resource Name (ARN) for the group.", "UserPoolId": "The user pool ID for the user pool." } }, @@ -8518,7 +8518,7 @@ "properties": { "AttributeMapping": "A mapping of identity provider attributes to standard and custom user pool attributes.", "IdpIdentifiers": "A list of identity provider identifiers.", - "ProviderDetails": "The identity provider details. The following list describes the provider detail keys for each identity provider type.\n\n- For Google and Login with Amazon:\n\n- client_id\n- client_secret\n- authorize_scopes\n- For Facebook:\n\n- client_id\n- client_secret\n- authorize_scopes\n- api_version\n- For Sign in with Apple:\n\n- client_id\n- team_id\n- key_id\n- private_key\n- authorize_scopes\n- For OIDC providers:\n\n- client_id\n- client_secret\n- attributes_request_method\n- oidc_issuer\n- authorize_scopes\n- authorize_url *if not available from discovery URL specified by oidc_issuer key*\n- token_url *if not available from discovery URL specified by oidc_issuer key*\n- attributes_url *if not available from discovery URL specified by oidc_issuer key*\n- jwks_uri *if not available from discovery URL specified by oidc_issuer key*\n- For SAML providers:\n\n- MetadataFile OR MetadataURL\n- IDPSignout *optional*", + "ProviderDetails": "The identity provider details. The following list describes the provider detail keys for each identity provider type.\n\n- For Google and Login with Amazon:\n\n- client_id\n- client_secret\n- authorize_scopes\n- For Facebook:\n\n- client_id\n- client_secret\n- authorize_scopes\n- api_version\n- For Sign in with Apple:\n\n- client_id\n- team_id\n- key_id\n- private_key\n- authorize_scopes\n- For OpenID Connect (OIDC) providers:\n\n- client_id\n- client_secret\n- attributes_request_method\n- oidc_issuer\n- authorize_scopes\n- authorize_url *if not available from discovery URL specified by oidc_issuer key*\n- token_url *if not available from discovery URL specified by oidc_issuer key*\n- attributes_url *if not available from discovery URL specified by oidc_issuer key*\n- jwks_uri *if not available from discovery URL specified by oidc_issuer key*\n- attributes_url_add_attributes *a read-only property that is set automatically*\n- For SAML providers:\n\n- MetadataFile OR MetadataURL\n- IDPSignout (optional)", "ProviderName": "The identity provider name.", "ProviderType": "The identity provider type.", "UserPoolId": "The user pool ID." @@ -8550,9 +8550,9 @@ }, "description": "The `AWS::Cognito::UserPoolRiskConfigurationAttachment` resource sets the risk configuration that is used for Amazon Cognito advanced security features.\n\nYou can specify risk configuration for a single client (with a specific `clientId` ) or for all clients (by setting the `clientId` to `ALL` ). If you specify `ALL` , the default configuration is used for every client that has had no risk configuration set previously. If you specify risk configuration for a particular client, it no longer falls back to the `ALL` configuration.", "properties": { - "AccountTakeoverRiskConfiguration": "The account takeover risk configuration object including the `NotifyConfiguration` object and `Actions` to take in the case of an account takeover.", + "AccountTakeoverRiskConfiguration": "The account takeover risk configuration object, including the `NotifyConfiguration` object and `Actions` to take if there is an account takeover.", "ClientId": "The app client ID. You can specify the risk configuration for a single client (with a specific ClientId) or for all clients (by setting the ClientId to `ALL` ).", - "CompromisedCredentialsRiskConfiguration": "The compromised credentials risk configuration object including the `EventFilter` and the `EventAction`", + "CompromisedCredentialsRiskConfiguration": "The compromised credentials risk configuration object, including the `EventFilter` and the `EventAction` .", "RiskExceptionConfiguration": "The configuration to override the risk decision.", "UserPoolId": "The user pool ID." } @@ -8561,7 +8561,7 @@ "attributes": {}, "description": "Account takeover action type.", "properties": { - "EventAction": "The event action.\n\n- `BLOCK` Choosing this action will block the request.\n- `MFA_IF_CONFIGURED` Throw MFA challenge if user has configured it, else allow the request.\n- `MFA_REQUIRED` Throw MFA challenge if user has configured it, else block the request.\n- `NO_ACTION` Allow the user sign-in.", + "EventAction": "The event action.\n\n- `BLOCK` Choosing this action will block the request.\n- `MFA_IF_CONFIGURED` Present an MFA challenge if user has configured it, else allow the request.\n- `MFA_REQUIRED` Present an MFA challenge if user has configured it, else block the request.\n- `NO_ACTION` Allow the user to sign in.", "Notify": "Flag specifying whether to send a notification." } }, @@ -8578,13 +8578,13 @@ "attributes": {}, "description": "Configuration for mitigation actions and notification for different levels of risk detected for a potential account takeover.", "properties": { - "Actions": "Account takeover risk configuration actions", + "Actions": "Account takeover risk configuration actions.", "NotifyConfiguration": "The notify configuration used to construct email notifications." } }, "AWS::Cognito::UserPoolRiskConfigurationAttachment.CompromisedCredentialsActionsType": { "attributes": {}, - "description": "The compromised credentials actions type", + "description": "The compromised credentials actions type.", "properties": { "EventAction": "The event action." } @@ -8602,28 +8602,28 @@ "description": "The notify configuration type.", "properties": { "BlockEmail": "Email template used when a detected risk event is blocked.", - "From": "The email address that is sending the email. It must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.", - "MfaEmail": "The MFA email template used when MFA is challenged as part of a detected risk.", + "From": "The email address that is sending the email. The address must be either individually verified with Amazon Simple Email Service, or from a domain that has been verified with Amazon SES.", + "MfaEmail": "The multi-factor authentication (MFA) email template used when MFA is challenged as part of a detected risk.", "NoActionEmail": "The email template used when a detected risk event is allowed.", "ReplyTo": "The destination to which the receiver of an email should reply to.", - "SourceArn": "The Amazon Resource Name (ARN) of the identity that is associated with the sending authorization policy. It permits Amazon Cognito to send for the email address specified in the `From` parameter." + "SourceArn": "The Amazon Resource Name (ARN) of the identity that is associated with the sending authorization policy. This identity permits Amazon Cognito to send for the email address specified in the `From` parameter." } }, "AWS::Cognito::UserPoolRiskConfigurationAttachment.NotifyEmailType": { "attributes": {}, "description": "The notify email type.", "properties": { - "HtmlBody": "The HTML body.", - "Subject": "The subject.", - "TextBody": "The text body." + "HtmlBody": "The email HTML body.", + "Subject": "The email subject.", + "TextBody": "The email text body." } }, "AWS::Cognito::UserPoolRiskConfigurationAttachment.RiskExceptionConfigurationType": { "attributes": {}, "description": "The type of the configuration to override the risk decision.", "properties": { - "BlockedIPRangeList": "Overrides the risk decision to always block the pre-authentication requests. The IP range is in CIDR notation: a compact representation of an IP address and its associated routing prefix.", - "SkippedIPRangeList": "Risk detection is not performed on the IP addresses in the range list. The IP range is in CIDR notation." + "BlockedIPRangeList": "Overrides the risk decision to always block the pre-authentication requests. The IP range is in CIDR notation, a compact representation of an IP address and its routing prefix.", + "SkippedIPRangeList": "Risk detection isn't performed on the IP addresses in this range list. The IP range is in CIDR notation." } }, "AWS::Cognito::UserPoolUICustomizationAttachment": { @@ -8644,13 +8644,13 @@ "description": "The `AWS::Cognito::UserPoolUser` resource creates an Amazon Cognito user pool user.", "properties": { "ClientMetadata": "A map of custom key-value pairs that you can provide as input for the custom workflow that is invoked by the *pre sign-up* trigger.\n\nYou create custom workflows by assigning AWS Lambda functions to user pool triggers. When you create a `UserPoolUser` resource and include the `ClientMetadata` property, Amazon Cognito invokes the function that is assigned to the *pre sign-up* trigger. When Amazon Cognito invokes this function, it passes a JSON payload, which the function receives as input. This payload contains a `clientMetadata` attribute, which provides the data that you assigned to the ClientMetadata property. In your function code in AWS Lambda , you can process the `clientMetadata` value to enhance your workflow for your specific needs.\n\nFor more information, see [Customizing User Pool Workflows with Lambda Triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) in the *Amazon Cognito Developer Guide* .\n\n> Take the following limitations into consideration when you use the ClientMetadata parameter:\n> \n> - Amazon Cognito does not store the ClientMetadata value. This data is available only to AWS Lambda triggers that are assigned to a user pool to support custom workflows. If your user pool configuration does not include triggers, the ClientMetadata parameter serves no purpose.\n> - Amazon Cognito does not validate the ClientMetadata value.\n> - Amazon Cognito does not encrypt the the ClientMetadata value, so don't use it to provide sensitive information.", - "DesiredDeliveryMediums": "Specify `\"EMAIL\"` if email will be used to send the welcome message. Specify `\"SMS\"` if the phone number will be used. The default value is `\"SMS\"` . More than one value can be specified.", - "ForceAliasCreation": "This parameter is only used if the `phone_number_verified` or `email_verified` attribute is set to `True` . Otherwise, it is ignored.\n\nIf this parameter is set to `True` and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user. The previous user will no longer be able to log in using that alias.\n\nIf this parameter is set to `False` , the API throws an `AliasExistsException` error if the alias already exists. The default value is `False` .", - "MessageAction": "Set to `\"RESEND\"` to resend the invitation message to a user that already exists and reset the expiration limit on the user's account. Set to `\"SUPPRESS\"` to suppress sending the message. Only one value can be specified.", + "DesiredDeliveryMediums": "Specify `\"EMAIL\"` if email will be used to send the welcome message. Specify `\"SMS\"` if the phone number will be used. The default value is `\"SMS\"` . You can specify more than one value.", + "ForceAliasCreation": "This parameter is used only if the `phone_number_verified` or `email_verified` attribute is set to `True` . Otherwise, it is ignored.\n\nIf this parameter is set to `True` and the phone number or email address specified in the UserAttributes parameter already exists as an alias with a different user, the API call will migrate the alias from the previous user to the newly created user. The previous user will no longer be able to log in using that alias.\n\nIf this parameter is set to `False` , the API throws an `AliasExistsException` error if the alias already exists. The default value is `False` .", + "MessageAction": "Set to `RESEND` to resend the invitation message to a user that already exists and reset the expiration limit on the user's account. Set to `SUPPRESS` to suppress sending the message. You can specify only one value.", "UserAttributes": "The user attributes and attribute values to be set for the user to be created. These are name-value pairs You can create a user without specifying any attributes other than `Username` . However, any attributes that you specify as required (in [](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) or in the *Attributes* tab of the console) must be supplied either by you (in your call to `AdminCreateUser` ) or by the user (when they sign up in response to your welcome message).\n\nFor custom attributes, you must prepend the `custom:` prefix to the attribute name.\n\nTo send a message inviting the user to sign up, you must specify the user's email address or phone number. This can be done in your call to AdminCreateUser or in the *Users* tab of the Amazon Cognito console for managing your user pools.\n\nIn your call to `AdminCreateUser` , you can set the `email_verified` attribute to `True` , and you can set the `phone_number_verified` attribute to `True` . (You can also do this by calling [](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html) .)\n\n- *email* : The email address of the user to whom the message that contains the code and user name will be sent. Required if the `email_verified` attribute is set to `True` , or if `\"EMAIL\"` is specified in the `DesiredDeliveryMediums` parameter.\n- *phone_number* : The phone number of the user to whom the message that contains the code and user name will be sent. Required if the `phone_number_verified` attribute is set to `True` , or if `\"SMS\"` is specified in the `DesiredDeliveryMediums` parameter.", "UserPoolId": "The user pool ID for the user pool where the user will be created.", - "Username": "The username for the user. Must be unique within the user pool. Must be a UTF-8 string between 1 and 128 characters. After the user is created, the username cannot be changed.", - "ValidationData": "The user's validation data. This is an array of name-value pairs that contain user attributes and attribute values that you can use for custom validation, such as restricting the types of user accounts that can be registered. For example, you might choose to allow or disallow user sign-up based on the user's domain.\n\nTo configure custom validation, you must create a Pre Sign-up Lambda trigger for the user pool as described in the Amazon Cognito Developer Guide. The Lambda trigger receives the validation data and uses it in the validation process.\n\nThe user's validation data is not persisted." + "Username": "The username for the user. Must be unique within the user pool. Must be a UTF-8 string between 1 and 128 characters. After the user is created, the username can't be changed.", + "ValidationData": "The user's validation data. This is an array of name-value pairs that contain user attributes and attribute values that you can use for custom validation, such as restricting the types of user accounts that can be registered. For example, you might choose to allow or disallow user sign-up based on the user's domain.\n\nTo configure custom validation, you must create a Pre Sign-up AWS Lambda trigger for the user pool as described in the Amazon Cognito Developer Guide. The Lambda trigger receives the validation data and uses it in the validation process.\n\nThe user's validation data isn't persisted." } }, "AWS::Cognito::UserPoolUser.AttributeType": { @@ -13451,7 +13451,7 @@ "properties": { "AutoScalingGroupArn": "The Amazon Resource Name (ARN) or short name that identifies the Auto Scaling group.", "ManagedScaling": "The managed scaling settings for the Auto Scaling group capacity provider.", - "ManagedTerminationProtection": "The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection.\n\n> When using managed termination protection, managed scaling must also be used otherwise managed termination protection doesn't work. \n\nWhen managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions enabled as well. For more information, see [Instance Protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection) in the *AWS Auto Scaling User Guide* .\n\nWhen managed termination protection is disabled, your Amazon EC2 instances aren't protected from termination when the Auto Scaling group scales in." + "ManagedTerminationProtection": "The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection. The default is disabled.\n\n> When using managed termination protection, managed scaling must also be used otherwise managed termination protection doesn't work. \n\nWhen managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions enabled as well. For more information, see [Instance Protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection) in the *AWS Auto Scaling User Guide* .\n\nWhen managed termination protection is disabled, your Amazon EC2 instances aren't protected from termination when the Auto Scaling group scales in." } }, "AWS::ECS::CapacityProvider.ManagedScaling": { @@ -13729,7 +13729,7 @@ "Links": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to [Legacy container links](https://docs.aws.amazon.com/https://docs.docker.com/network/links/) in the Docker documentation. This parameter maps to `Links` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--link` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", "LinuxParameters": "Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information see [KernelCapabilities](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html) .\n\n> This parameter is not supported for Windows containers.", "LogConfiguration": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", - "Memory": "The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task `memory` value, if one is specified. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nIf using the Fargate launch type, this parameter is optional.\n\nIf using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level `memory` and `memoryReservation` value, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nThe Docker daemon reserves a minimum of 4 MiB of memory for a container. Therefore, we recommend that you specify fewer than 4 MiB of memory for your containers.", + "Memory": "The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task `memory` value, if one is specified. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nIf using the Fargate launch type, this parameter is optional.\n\nIf using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level `memory` and `memoryReservation` value, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container, so you should not specify fewer than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.", "MemoryReservation": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--memory-reservation` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker daemon reserves a minimum of 4 MiB of memory for a container. Therefore, we recommend that you specify fewer than 4 MiB of memory for your containers.", "MountPoints": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volume` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", "Name": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--name` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", @@ -13927,9 +13927,9 @@ }, "AWS::ECS::TaskDefinition.RuntimePlatform": { "attributes": {}, - "description": "Information about the platform for the Amazon ECS service or task.", + "description": "Information about the platform for the Amazon ECS service or task.\n\nFor more informataion about `RuntimePlatform` , see [RuntimePlatform](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform) in the *Amazon Elastic Container Service Developer Guide* .", "properties": { - "CpuArchitecture": "The CPU architecture.", + "CpuArchitecture": "The CPU architecture.\n\nYou can run your Linux tasks on an ARM-based platform by setting the value to `ARM64` . This option is avaiable for tasks that run on Linuc Amazon EC2 instance or Linux containers on Fargate.", "OperatingSystemFamily": "The operating system." } }, @@ -31904,7 +31904,7 @@ "AllowMajorVersionUpgrade": "A value that indicates whether major version upgrades are allowed. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible.\n\nConstraints: Major version upgrades must be allowed when specifying a value for the `EngineVersion` parameter that is a different major version than the DB instance's current version.", "AssociatedRoles": "The AWS Identity and Access Management (IAM) roles associated with the DB instance.", "AutoMinorVersionUpgrade": "A value that indicates whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. By default, minor engine upgrades are applied automatically.", - "AvailabilityZone": "The Availability Zone that the database instance will be created in.\n\nDefault: A random, system-chosen Availability Zone in the endpoint's region.\n\nExample: `us-east-1d`\n\nConstraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to `true` . The specified Availability Zone must be in the same region as the current endpoint.", + "AvailabilityZone": "The Availability Zone (AZ) where the database will be created. For information on AWS Regions and Availability Zones, see [Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) .\n\n*Amazon Aurora*\n\nNot applicable. Availability Zones are managed by the DB cluster.\n\nDefault: A random, system-chosen Availability Zone in the endpoint's AWS Region.\n\nExample: `us-east-1d`\n\nConstraint: The `AvailabilityZone` parameter can't be specified if the DB instance is a Multi-AZ deployment. The specified Availability Zone must be in the same AWS Region as the current endpoint.\n\n> If you're creating a DB instance in an RDS on VMware environment, specify the identifier of the custom Availability Zone to create the DB instance in.\n> \n> For more information about RDS on VMware, see the [RDS on VMware User Guide.](https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html)", "BackupRetentionPeriod": "The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.\n\n*Amazon Aurora*\n\nNot applicable. The retention period for automated backups is managed by the DB cluster.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 0 to 35\n- Can't be set to 0 if the DB instance is a source to read replicas", "CACertificateIdentifier": "The identifier of the CA certificate for this DB instance.\n\n> Specifying or updating this property triggers a reboot. \n\nFor more information about CA certificate identifiers for RDS DB engines, see [Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon RDS User Guide* .\n\nFor more information about CA certificate identifiers for Aurora DB engines, see [Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon Aurora User Guide* .", "CharacterSetName": "For supported engines, indicates that the DB instance should be associated with the specified character set.\n\n*Amazon Aurora*\n\nNot applicable. The character set is managed by the DB cluster. For more information, see [AWS::RDS::DBCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html) .", @@ -31976,7 +31976,7 @@ }, "description": "The `AWS::RDS::DBParameterGroup` resource creates a custom parameter group for an RDS database family.\n\nThis type can be declared in a template and referenced in the `DBParameterGroupName` property of an `[AWS::RDS::DBInstance](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html)` resource.\n\nFor information about configuring parameters for Amazon RDS DB instances, see [Working with DB parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor information about configuring parameters for Amazon Aurora DB instances, see [Working with DB parameter groups and DB cluster parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> Applying a parameter group to a DB instance may require the DB instance to reboot, resulting in a database outage for the duration of the reboot.", "properties": { - "Description": "Provides the customer-specified description for this DB Parameter Group.", + "Description": "Provides the customer-specified description for this DB parameter group.", "Family": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a DB engine and engine version compatible with that DB parameter group family.\n\n> The DB parameter group family can't be changed when updating a DB parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html)` .", "Parameters": "An array of parameter names and values for the parameter update. At least one parameter name and value must be supplied. Subsequent arguments are optional.\n\nFor more information about DB parameters and DB parameter groups for Amazon RDS DB engines, see [Working with DB Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor more information about DB cluster and DB instance parameters and parameter groups for Amazon Aurora DB engines, see [Working with DB Parameter Groups and DB Cluster Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", "Tags": "Tags to assign to the DB parameter group." @@ -32081,7 +32081,7 @@ "properties": { "DBSecurityGroupIngress": "Ingress rules to be applied to the DB security group.", "EC2VpcId": "The identifier of an Amazon VPC. This property indicates the VPC that this DB security group belongs to.\n\n> The `EC2VpcId` property is for backward compatibility with older regions, and is no longer recommended for providing security information to an RDS DB instance.", - "GroupDescription": "Provides the description of the DB Security Group.", + "GroupDescription": "Provides the description of the DB security group.", "Tags": "Tags to assign to the DB security group." } }, @@ -32090,9 +32090,9 @@ "description": "The `Ingress` property type specifies an individual ingress rule within an `AWS::RDS::DBSecurityGroup` resource.", "properties": { "CIDRIP": "The IP range to authorize.", - "EC2SecurityGroupId": "Id of the EC2 Security Group to authorize. For VPC DB Security Groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", - "EC2SecurityGroupName": "Name of the EC2 Security Group to authorize. For VPC DB Security Groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", - "EC2SecurityGroupOwnerId": "AWS Account Number of the owner of the EC2 Security Group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB Security Groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided." + "EC2SecurityGroupId": "Id of the EC2 security group to authorize. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", + "EC2SecurityGroupName": "Name of the EC2 security group to authorize. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", + "EC2SecurityGroupOwnerId": "AWS account number of the owner of the EC2 security group specified in the `EC2SecurityGroupName` parameter. The AWS access key ID isn't an acceptable value. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided." } }, "AWS::RDS::DBSecurityGroupIngress": { @@ -32102,10 +32102,10 @@ "description": "The `AWS::RDS::DBSecurityGroupIngress` resource enables ingress to a DB security group using one of two forms of authorization. First, you can add EC2 or VPC security groups to the DB security group if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet.\n\nThis type supports updates. For more information about updating stacks, see [AWS CloudFormation Stacks Updates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks.html) .\n\nFor details about the settings for DB security group ingress, see [AuthorizeDBSecurityGroupIngress](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_AuthorizeDBSecurityGroupIngress.html) .", "properties": { "CIDRIP": "The IP range to authorize.", - "DBSecurityGroupName": "The name of the DB Security Group to add authorization to.", - "EC2SecurityGroupId": "Id of the EC2 Security Group to authorize. For VPC DB Security Groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", - "EC2SecurityGroupName": "Name of the EC2 Security Group to authorize. For VPC DB Security Groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", - "EC2SecurityGroupOwnerId": "AWS Account Number of the owner of the EC2 Security Group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB Security Groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided." + "DBSecurityGroupName": "The name of the DB security group to add authorization to.", + "EC2SecurityGroupId": "Id of the EC2 security group to authorize. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", + "EC2SecurityGroupName": "Name of the EC2 security group to authorize. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", + "EC2SecurityGroupOwnerId": "AWS account number of the owner of the EC2 security group specified in the `EC2SecurityGroupName` parameter. The AWS access key ID isn't an acceptable value. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided." } }, "AWS::RDS::DBSubnetGroup": { @@ -32114,9 +32114,9 @@ }, "description": "The `AWS::RDS::DBSubnetGroup` resource creates a database subnet group. Subnet groups must contain at least two subnets in two different Availability Zones in the same region.\n\nFor more information, see [Working with DB subnet groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html#USER_VPC.Subnets) in the *Amazon RDS User Guide* .", "properties": { - "DBSubnetGroupDescription": "The description for the DB Subnet Group.", + "DBSubnetGroupDescription": "The description for the DB subnet group.", "DBSubnetGroupName": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", - "SubnetIds": "The EC2 Subnet IDs for the DB Subnet Group.", + "SubnetIds": "The EC2 Subnet IDs for the DB subnet group.", "Tags": "Tags to assign to the DB subnet group." } }, @@ -32126,8 +32126,8 @@ }, "description": "The `AWS::RDS::EventSubscription` resource allows you to receive notifications for Amazon Relational Database Service events through the Amazon Simple Notification Service (Amazon SNS). For more information, see [Using Amazon RDS Event Notification](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) in the *Amazon RDS User Guide* .", "properties": { - "Enabled": "A Boolean value; set to *true* to activate the subscription, set to *false* to create the subscription but not active it.", - "EventCategories": "A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the [Events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) topic in the Amazon RDS User Guide or by using the *DescribeEventCategories* action.", + "Enabled": "A value that indicates whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active.", + "EventCategories": "A list of event categories for a particular source type ( `SourceType` ) that you want to subscribe to. You can see a list of the categories for a given source type in [Events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) in the *Amazon RDS User Guide* or by using the `DescribeEventCategories` operation.", "SnsTopicArn": "The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.", "SourceIds": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", "SourceType": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`" @@ -32695,7 +32695,7 @@ }, "AWS::RoboMaker::Robot": { "attributes": { - "Arn": "", + "Arn": "The Amazon Resource Name (ARN) of the robot.", "Ref": "When you pass the logical ID of an `AWS::RoboMaker::Robot` resource to the intrinsic `Ref` function, the function returns the Amazon Resource Name (ARN) of the robot application, such as `arn:aws:robomaker:us-west-2:123456789012:robot/MyRobot/1544035373264` ." }, "description": "The `AWS::RoboMaker::RobotApplication` resource creates an AWS RoboMaker robot.", @@ -32741,8 +32741,8 @@ }, "AWS::RoboMaker::RobotApplicationVersion": { "attributes": { - "ApplicationVersion": "", - "Arn": "", + "ApplicationVersion": "The robot application version.", + "Arn": "The Amazon Resource Name (ARN) of the robot application version.", "Ref": "When you pass the logical ID of an `AWS::RoboMaker::RobotApplicationVersion` resource to the intrinsic `Ref` function, the function returns the Amazon Resource Name (ARN) of the robot application version, such as `arn:aws:robomaker:us-west-2:123456789012:robot-application/MyRobotApplication/1546541208251` ." }, "description": "The `AWS::RoboMaker::RobotApplicationVersion` resource creates an AWS RoboMaker robot version.", @@ -32760,7 +32760,7 @@ "description": "The `AWS::RoboMaker::SimulationApplication` resource creates an AWS RoboMaker simulation application.", "properties": { "CurrentRevisionId": "The current revision id.", - "Environment": "", + "Environment": "The environment of the simulation application.", "Name": "The name of the simulation application.", "RenderingEngine": "The rendering engine for the simulation application.", "RobotSoftwareSuite": "The robot software suite (ROS distribution) used by the simulation application.", @@ -32804,8 +32804,8 @@ }, "AWS::RoboMaker::SimulationApplicationVersion": { "attributes": { - "ApplicationVersion": "", - "Arn": "", + "ApplicationVersion": "The simulation application version.", + "Arn": "The Amazon Resource Name (ARN) of the simulation application version.", "Ref": "When you pass the logical ID of an `AWS::RoboMaker::SimulationApplicationVersion` resource to the intrinsic `Ref` function, the function returns the Amazon Resource Name (ARN) of the simulation application version, such as `arn:aws:robomaker:us-west-2:123456789012:simulation-application/MySimulationApplication/1546541201334` ." }, "description": "The `AWS::RoboMaker::SimulationApplicationVersion` resource creates a version of an AWS RoboMaker simulation application.", @@ -33549,7 +33549,7 @@ }, "AWS::S3::Bucket.EventBridgeConfiguration": { "attributes": {}, - "description": "Amazon S3 can send events to Amazon EventBridge whenever certain events happen in your bucket, see [Using EventBridge](https://docs.aws.amazon.com/AmazonS3/latest/userguide/EventBridge.html) in the *Amazon S3 User Guide* .\n\nUnlike other destinations, delivery of events to EventBridge can be either enabled or disabled for a bucket. If enabled, all events will be sent to EventBridge and you can use EventBridge rules and filters to route events to additional targets. For more information, see [What Is Amazon EventBridge](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-what-is.html) in the *Amazon EventBridge User Guide*", + "description": "Amazon S3 can send events to Amazon EventBridge whenever certain events happen in your bucket, see [Using EventBridge](https://docs.aws.amazon.com/AmazonS3/latest/userguide/EventBridge.html) in the *Amazon S3 User Guide* .\n\nUnlike other destinations, delivery of events to EventBridge can be either enabled or disabled for a bucket. If enabled, all events will be sent to EventBridge and you can use EventBridge rules to route events to additional targets. For more information, see [What Is Amazon EventBridge](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-what-is.html) in the *Amazon EventBridge User Guide*", "properties": { "EventBridgeEnabled": "Enables delivery of events to Amazon EventBridge." } @@ -34683,8 +34683,8 @@ "CutoffBehavior": "The specification for whether tasks should continue to run after the cutoff time specified in the maintenance windows is reached.", "Description": "A description of the task.", "LoggingInfo": "Information about an Amazon S3 bucket to write task-level logs to.\n\n> `LoggingInfo` has been deprecated. To specify an Amazon S3 bucket to contain logs, instead use the `OutputS3BucketName` and `OutputS3KeyPrefix` options in the `TaskInvocationParameters` structure. For information about how Systems Manager handles these options for the supported maintenance window task types, see [AWS Systems Manager MaintenanceWindowTask TaskInvocationParameters](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-maintenancewindowtask-taskinvocationparameters.html) .", - "MaxConcurrency": "The maximum number of targets this task can be run for, in parallel.", - "MaxErrors": "The maximum number of errors allowed before this task stops being scheduled.", + "MaxConcurrency": "The maximum number of targets this task can be run for, in parallel.\n\n> Although this element is listed as \"Required: No\", a value can be omitted only when you are registering or updating a [targetless task](https://docs.aws.amazon.com/systems-manager/latest/userguide/maintenance-windows-targetless-tasks.html) You must provide a value in all other cases.\n> \n> For maintenance window tasks without a target specified, you can't supply a value for this option. Instead, the system inserts a placeholder value of `1` . This value doesn't affect the running of your task.", + "MaxErrors": "The maximum number of errors allowed before this task stops being scheduled.\n\n> Although this element is listed as \"Required: No\", a value can be omitted only when you are registering or updating a [targetless task](https://docs.aws.amazon.com/systems-manager/latest/userguide/maintenance-windows-targetless-tasks.html) You must provide a value in all other cases.\n> \n> For maintenance window tasks without a target specified, you can't supply a value for this option. Instead, the system inserts a placeholder value of `1` . This value doesn't affect the running of your task.", "Name": "The task name.", "Priority": "The priority of the task in the maintenance window. The lower the number, the higher the priority. Tasks that have the same priority are scheduled in parallel.", "ServiceRoleArn": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) service role to use to publish Amazon Simple Notification Service (Amazon SNS) notifications for maintenance window Run Command tasks.", From 76b5c0d12e1e692efcf6a557ee4ddb6df3709e4d Mon Sep 17 00:00:00 2001 From: Philipp Hoefflin Date: Wed, 5 Jan 2022 11:36:40 +0100 Subject: [PATCH 002/374] fix(route53): support multiple cross account DNS delegations (#17837) the custom resource lambda function's role is only created once. To support multiple zone delegations the role creation and policy management needs to be decoupled so each CrossAccountZoneDelegationRecord instance can add an individual policy to the role. Fixes #17836 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../@aws-cdk/aws-route53/lib/record-set.ts | 13 +- ...ross-account-zone-delegation.expected.json | 118 +++++++++++++--- .../aws-route53/test/record-set.test.ts | 133 ++++++++++++++++++ 3 files changed, 241 insertions(+), 23 deletions(-) diff --git a/packages/@aws-cdk/aws-route53/lib/record-set.ts b/packages/@aws-cdk/aws-route53/lib/record-set.ts index 6f49f06b5c70c..823257f6fb576 100644 --- a/packages/@aws-cdk/aws-route53/lib/record-set.ts +++ b/packages/@aws-cdk/aws-route53/lib/record-set.ts @@ -683,15 +683,22 @@ export class CrossAccountZoneDelegationRecord extends CoreConstruct { throw Error('Only one of parentHostedZoneName and parentHostedZoneId is supported'); } - const serviceToken = CustomResourceProvider.getOrCreate(this, CROSS_ACCOUNT_ZONE_DELEGATION_RESOURCE_TYPE, { + const provider = CustomResourceProvider.getOrCreateProvider(this, CROSS_ACCOUNT_ZONE_DELEGATION_RESOURCE_TYPE, { codeDirectory: path.join(__dirname, 'cross-account-zone-delegation-handler'), runtime: CustomResourceProviderRuntime.NODEJS_12_X, - policyStatements: [{ Effect: 'Allow', Action: 'sts:AssumeRole', Resource: props.delegationRole.roleArn }], }); + const role = iam.Role.fromRoleArn(this, 'cross-account-zone-delegation-handler-role', provider.roleArn); + + role.addToPrincipalPolicy(new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + actions: ['sts:AssumeRole'], + resources: [props.delegationRole.roleArn], + })); + new CustomResource(this, 'CrossAccountZoneDelegationCustomResource', { resourceType: CROSS_ACCOUNT_ZONE_DELEGATION_RESOURCE_TYPE, - serviceToken, + serviceToken: provider.serviceToken, removalPolicy: props.removalPolicy, properties: { AssumeRoleArn: props.delegationRole.roleArn, diff --git a/packages/@aws-cdk/aws-route53/test/integ.cross-account-zone-delegation.expected.json b/packages/@aws-cdk/aws-route53/test/integ.cross-account-zone-delegation.expected.json index 281fc984d0756..d5890cda1c955 100644 --- a/packages/@aws-cdk/aws-route53/test/integ.cross-account-zone-delegation.expected.json +++ b/packages/@aws-cdk/aws-route53/test/integ.cross-account-zone-delegation.expected.json @@ -78,6 +78,55 @@ "Name": "sub.myzone.com." } }, + "DelegationWithZoneIdcrossaccountzonedelegationhandlerrolePolicy5170A69B": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "ParentHostedZoneCrossAccountZoneDelegationRole95B1C36E", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "DelegationWithZoneIdcrossaccountzonedelegationhandlerrolePolicy5170A69B", + "Roles": [ + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "/", + { + "Fn::Select": [ + 5, + { + "Fn::Split": [ + ":", + { + "Fn::GetAtt": [ + "CustomCrossAccountZoneDelegationCustomResourceProviderRoleED64687B", + "Arn" + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + }, "DelegationWithZoneIdCrossAccountZoneDelegationCustomResourceFFD766E7": { "Type": "Custom::CrossAccountZoneDelegation", "Properties": { @@ -127,26 +176,6 @@ { "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" } - ], - "Policies": [ - { - "PolicyName": "Inline", - "PolicyDocument": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "sts:AssumeRole", - "Resource": { - "Fn::GetAtt": [ - "ParentHostedZoneCrossAccountZoneDelegationRole95B1C36E", - "Arn" - ] - } - } - ] - } - } ] } }, @@ -212,6 +241,55 @@ "Name": "anothersub.myzone.com." } }, + "DelegationWithZoneNamecrossaccountzonedelegationhandlerrolePolicy86996882": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "ParentHostedZoneCrossAccountZoneDelegationRole95B1C36E", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "DelegationWithZoneNamecrossaccountzonedelegationhandlerrolePolicy86996882", + "Roles": [ + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "/", + { + "Fn::Select": [ + 5, + { + "Fn::Split": [ + ":", + { + "Fn::GetAtt": [ + "CustomCrossAccountZoneDelegationCustomResourceProviderRoleED64687B", + "Arn" + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + }, "DelegationWithZoneNameCrossAccountZoneDelegationCustomResourceA1A1C94A": { "Type": "Custom::CrossAccountZoneDelegation", "Properties": { diff --git a/packages/@aws-cdk/aws-route53/test/record-set.test.ts b/packages/@aws-cdk/aws-route53/test/record-set.test.ts index 8e380e3dfcd81..7c716545a6f24 100644 --- a/packages/@aws-cdk/aws-route53/test/record-set.test.ts +++ b/packages/@aws-cdk/aws-route53/test/record-set.test.ts @@ -732,4 +732,137 @@ describe('record set', () => { }); + + test('Multiple cross account zone delegation records', () => { + // GIVEN + const stack = new Stack(); + const parentZone = new route53.PublicHostedZone(stack, 'ParentHostedZone', { + zoneName: 'myzone.com', + crossAccountZoneDelegationPrincipal: new iam.AccountPrincipal('123456789012'), + }); + + // WHEN + const childZone = new route53.PublicHostedZone(stack, 'ChildHostedZone', { + zoneName: 'sub.myzone.com', + }); + new route53.CrossAccountZoneDelegationRecord(stack, 'Delegation', { + delegatedZone: childZone, + parentHostedZoneName: 'myzone.com', + delegationRole: parentZone.crossAccountZoneDelegationRole!, + ttl: Duration.seconds(60), + }); + const childZone2 = new route53.PublicHostedZone(stack, 'ChildHostedZone2', { + zoneName: 'anothersub.myzone.com', + }); + new route53.CrossAccountZoneDelegationRecord(stack, 'Delegation2', { + delegatedZone: childZone2, + parentHostedZoneName: 'myzone.com', + delegationRole: parentZone.crossAccountZoneDelegationRole!, + ttl: Duration.seconds(60), + }); + + // THEN + const childHostedZones = [ + { name: 'sub.myzone.com', id: 'ChildHostedZone4B14AC71' }, + { name: 'anothersub.myzone.com', id: 'ChildHostedZone2A37198F0' }, + ]; + + for (var childHostedZone of childHostedZones) { + expect(stack).toHaveResource('Custom::CrossAccountZoneDelegation', { + ServiceToken: { + 'Fn::GetAtt': [ + 'CustomCrossAccountZoneDelegationCustomResourceProviderHandler44A84265', + 'Arn', + ], + }, + AssumeRoleArn: { + 'Fn::GetAtt': [ + 'ParentHostedZoneCrossAccountZoneDelegationRole95B1C36E', + 'Arn', + ], + }, + ParentZoneName: 'myzone.com', + DelegatedZoneName: childHostedZone.name, + DelegatedZoneNameServers: { + 'Fn::GetAtt': [ + childHostedZone.id, + 'NameServers', + ], + }, + TTL: 60, + }); + } + }); + + test('Cross account zone delegation policies', () => { + // GIVEN + const stack = new Stack(); + const parentZone = new route53.PublicHostedZone(stack, 'ParentHostedZone', { + zoneName: 'myzone.com', + crossAccountZoneDelegationPrincipal: new iam.AccountPrincipal('123456789012'), + }); + + // WHEN + const childZone = new route53.PublicHostedZone(stack, 'ChildHostedZone', { + zoneName: 'sub.myzone.com', + }); + new route53.CrossAccountZoneDelegationRecord(stack, 'Delegation', { + delegatedZone: childZone, + parentHostedZoneName: 'myzone.com', + delegationRole: parentZone.crossAccountZoneDelegationRole!, + ttl: Duration.seconds(60), + }); + const childZone2 = new route53.PublicHostedZone(stack, 'ChildHostedZone2', { + zoneName: 'anothersub.myzone.com', + }); + new route53.CrossAccountZoneDelegationRecord(stack, 'Delegation2', { + delegatedZone: childZone2, + parentHostedZoneName: 'myzone.com', + delegationRole: parentZone.crossAccountZoneDelegationRole!, + ttl: Duration.seconds(60), + }); + + // THEN + const policyNames = [ + 'DelegationcrossaccountzonedelegationhandlerrolePolicy1E157602', + 'Delegation2crossaccountzonedelegationhandlerrolePolicy713BEAC3', + ]; + + for (var policyName of policyNames) { + expect(stack).toHaveResource('AWS::IAM::Policy', { + PolicyName: policyName, + PolicyDocument: { + Version: '2012-10-17', + Statement: [ + { + Action: 'sts:AssumeRole', + Effect: 'Allow', + Resource: { + 'Fn::GetAtt': [ + 'ParentHostedZoneCrossAccountZoneDelegationRole95B1C36E', + 'Arn', + ], + }, + }, + ], + }, + Roles: [ + { + 'Fn::Select': [1, { + 'Fn::Split': ['/', { + 'Fn::Select': [5, { + 'Fn::Split': [':', { + 'Fn::GetAtt': [ + 'CustomCrossAccountZoneDelegationCustomResourceProviderRoleED64687B', + 'Arn', + ], + }], + }], + }], + }], + }, + ], + }); + } + }); }); From c208e6043e4a184b4d3ac2508ebef1cb31bace43 Mon Sep 17 00:00:00 2001 From: AWS CDK Automation <43080478+aws-cdk-automation@users.noreply.github.com> Date: Wed, 5 Jan 2022 03:24:19 -0800 Subject: [PATCH 003/374] feat(cfnspec): cloudformation spec v51.0.0 (#18274) Co-authored-by: AWS CDK Team Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- packages/@aws-cdk/cfnspec/CHANGELOG.md | 16 ++++++++++++++++ .../specification/100_sam/000_official/spec.json | 7 +++++++ 2 files changed, 23 insertions(+) diff --git a/packages/@aws-cdk/cfnspec/CHANGELOG.md b/packages/@aws-cdk/cfnspec/CHANGELOG.md index 00bc8552e8367..4f3775440e943 100644 --- a/packages/@aws-cdk/cfnspec/CHANGELOG.md +++ b/packages/@aws-cdk/cfnspec/CHANGELOG.md @@ -1,3 +1,19 @@ + +# Serverless Application Model (SAM) Resource Specification v2016-10-31 + +## New Resource Types + + +## Attribute Changes + + +## Property Changes + +* AWS::Serverless::Function Architectures (__added__) + +## Property Type Changes + + # CloudFormation Resource Specification v51.0.0 ## New Resource Types diff --git a/packages/@aws-cdk/cfnspec/spec-source/specification/100_sam/000_official/spec.json b/packages/@aws-cdk/cfnspec/spec-source/specification/100_sam/000_official/spec.json index 5510475b3e7d2..71c213777a0cf 100644 --- a/packages/@aws-cdk/cfnspec/spec-source/specification/100_sam/000_official/spec.json +++ b/packages/@aws-cdk/cfnspec/spec-source/specification/100_sam/000_official/spec.json @@ -1972,6 +1972,13 @@ "AWS::Serverless::Function": { "Documentation": "https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#awsserverlessfunction", "Properties": { + "Architectures": { + "Documentation": "https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html#sam-function-architectures", + "PrimitiveItemType": "String", + "Required": false, + "Type": "List", + "UpdateType": "Immutable" + }, "AssumeRolePolicyDocument": { "Documentation": "https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-resource-function.html#sam-function-assumerolepolicydocument", "PrimitiveType": "Json", From 6359c12e3242e23d9b3bf0a42cac7c361c8d4d8a Mon Sep 17 00:00:00 2001 From: Romain Marcadier Date: Wed, 5 Jan 2022 13:10:30 +0100 Subject: [PATCH 004/374] fix(cfn2ts): some property times have behavioral-interface names (#18275) Some CloudFormation library generated types, such as `@aws-cdk/aws-networkfirewall.CfnRuleGroup.IPSetProperty` are meant to be jsii structs, but have names that cause them to be handled as jsii behavioral interfaces (`I` followed by a capital letter). Mangling the names (i.e: changing `IPSet` to `IpSet`) would also work, but is tedious to maintain and has been proven to lead to inadeverten releasing of stable code that does not have the intended "shape" in other languages. Instead - this uses the jsii type system hints feature introduced a few months ago in the compiler to force those interfaces to be considered as structs regardless of their name. Fixes https://github.com/aws/jsii/issues/2929 (and probably more) BREAKING CHANGE: some "complex" property types within the generated CloudFormation interfaces (i.e: properties of `Cfn*` constructs) with names starting with a capital letter `I` followed by another capital letter are no longer incorrectly treated as behavioral interfaces, and might hence have different usage patterns in non-TypeScript languages. Such interfaces were previously very difficult to use in non-TypeScript languages, and required convoluted workarounds, which can now be removed. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- tools/@aws-cdk/cfn2ts/lib/codegen.ts | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/@aws-cdk/cfn2ts/lib/codegen.ts b/tools/@aws-cdk/cfn2ts/lib/codegen.ts index d9c3fac8b6fee..79a33b2c3e46e 100644 --- a/tools/@aws-cdk/cfn2ts/lib/codegen.ts +++ b/tools/@aws-cdk/cfn2ts/lib/codegen.ts @@ -117,6 +117,7 @@ export default class CodeGenerator { this.docLink(spec.Documentation, `Properties for defining a \`${resourceContext.className}\``, '', + '@struct', // Make this interface ALWAYS be treated as a struct, event if it's named `IPSet...` or something... '@stability external'); this.code.openBlock(`export interface ${name.className}`); @@ -861,6 +862,8 @@ export default class CodeGenerator { this.docLink( propTypeSpec.Documentation, docs.description, + '', + '@struct', // Make this interface ALWAYS be treated as a struct, event if it's named `IPSet...` or something... '@stability external', ); /* From 786886974b171989c67095ea95a29e27441f25ec Mon Sep 17 00:00:00 2001 From: Peter Woodworth <44349620+peterwoodworth@users.noreply.github.com> Date: Wed, 5 Jan 2022 12:02:48 -0800 Subject: [PATCH 005/374] chore: update node version in readme (#18144) fixes: #18130 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- CONTRIBUTING.md | 2 +- README.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9e7a389384d07..b945ca8395896 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -52,7 +52,7 @@ See [Gitpod section](#gitpod) on how to set up the CDK repo on Gitpod. The following tools need to be installed on your system prior to installing the CDK: -- [Node.js >= 10.13.0](https://nodejs.org/download/release/latest-v10.x/) +- [Node.js >= 14.15.0](https://nodejs.org/download/release/latest-v14.x/) - We recommend using a version in [Active LTS](https://nodejs.org/en/about/releases/) - ⚠️ versions `13.0.0` to `13.6.0` are not supported due to compatibility issues with our dependencies. - [Yarn >= 1.19.1, < 2](https://yarnpkg.com/lang/en/docs/install) diff --git a/README.md b/README.md index 6f9bb422faa7d..d018fb8c7a65b 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ infrastructure definition and share it without worrying about boilerplate logic. The CDK is available in the following languages: -* JavaScript, TypeScript ([Node.js ≥ 10.13.0](https://nodejs.org/download/release/latest-v10.x/)) +* JavaScript, TypeScript ([Node.js ≥ 14.15.0](https://nodejs.org/download/release/latest-v14.x/)) - We recommend using a version in [Active LTS](https://nodejs.org/en/about/releases/) - ⚠️ versions `13.0.0` to `13.6.0` are not supported due to compatibility issues with our dependencies. * Python ([Python ≥ 3.6](https://www.python.org/downloads/)) @@ -77,7 +77,7 @@ in the CDK Developer Guide. For a detailed walkthrough, see the [tutorial](https://docs.aws.amazon.com/cdk/latest/guide/getting_started.html#hello_world_tutorial) in the AWS CDK [Developer Guide](https://docs.aws.amazon.com/cdk/latest/guide/home.html). ### At a glance -Install or update the [AWS CDK CLI] from npm (requires [Node.js ≥ 10.13.0](https://nodejs.org/download/release/latest-v10.x/)). We recommend using a version in [Active LTS](https://nodejs.org/en/about/releases/) +Install or update the [AWS CDK CLI] from npm (requires [Node.js ≥ 14.15.0](https://nodejs.org/download/release/latest-v14.x/)). We recommend using a version in [Active LTS](https://nodejs.org/en/about/releases/) ⚠️ versions `13.0.0` to `13.6.0` are not supported due to compatibility issues with our dependencies. ```console From cac11bba2ea0714dec8e23b069496d1b9d940685 Mon Sep 17 00:00:00 2001 From: Ian Gilham Date: Thu, 6 Jan 2022 01:23:56 +0000 Subject: [PATCH 006/374] fix(aws-kinesis): remove default shard count when stream mode is on-demand and set default mode to provisioned (#18221) Change the default Kinesis Data Stream's stream mode to provisioned from undefined to make the active configuration more explicit in the resulting CloudFormation templates. Fix an issue whereby the shard count is always set when the stream mode is set to on-demand, which is invalid. Shard count still defaults to `1` in provisioned mode, but is left undefined in on-demand mode. Add validation for the above so that an error is thrown from CDK when specifying on-demand mode with a shard count. Fixes #18139 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- ...nteg.dynamodb.kinesis-stream.expected.json | 5 +- .../integ.kinesis-stream.expected.json | 5 +- packages/@aws-cdk/aws-kinesis/lib/stream.ts | 13 +- .../test/integ.stream-dashboard.expected.json | 5 +- .../test/integ.stream.expected.json | 5 +- .../@aws-cdk/aws-kinesis/test/stream.test.ts | 128 +++++++++++++++++- ...elivery-stream.source-stream.expected.json | 5 +- .../test/integ.kinesis.expected.json | 5 +- .../test/integ.kinesiswithdlq.expected.json | 5 +- 9 files changed, 162 insertions(+), 14 deletions(-) diff --git a/packages/@aws-cdk/aws-dynamodb/test/integ.dynamodb.kinesis-stream.expected.json b/packages/@aws-cdk/aws-dynamodb/test/integ.dynamodb.kinesis-stream.expected.json index 77d522466ccf5..8f3ade0dc63d5 100644 --- a/packages/@aws-cdk/aws-dynamodb/test/integ.dynamodb.kinesis-stream.expected.json +++ b/packages/@aws-cdk/aws-dynamodb/test/integ.dynamodb.kinesis-stream.expected.json @@ -4,6 +4,9 @@ "Type": "AWS::Kinesis::Stream", "Properties": { "ShardCount": 1, + "StreamModeDetails": { + "StreamMode": "PROVISIONED" + }, "RetentionPeriodHours": 24, "StreamEncryption": { "Fn::If": [ @@ -73,4 +76,4 @@ ] } } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-events-targets/test/kinesis/integ.kinesis-stream.expected.json b/packages/@aws-cdk/aws-events-targets/test/kinesis/integ.kinesis-stream.expected.json index 4151890c35ef3..babfefc9b5972 100644 --- a/packages/@aws-cdk/aws-events-targets/test/kinesis/integ.kinesis-stream.expected.json +++ b/packages/@aws-cdk/aws-events-targets/test/kinesis/integ.kinesis-stream.expected.json @@ -4,6 +4,9 @@ "Type": "AWS::Kinesis::Stream", "Properties": { "ShardCount": 1, + "StreamModeDetails": { + "StreamMode": "PROVISIONED" + }, "RetentionPeriodHours": 24, "StreamEncryption": { "Fn::If": [ @@ -115,4 +118,4 @@ ] } } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-kinesis/lib/stream.ts b/packages/@aws-cdk/aws-kinesis/lib/stream.ts index 364cb4436f59c..fb1c345a9a844 100644 --- a/packages/@aws-cdk/aws-kinesis/lib/stream.ts +++ b/packages/@aws-cdk/aws-kinesis/lib/stream.ts @@ -673,6 +673,9 @@ export interface StreamProps { /** * The number of shards for the stream. + * + * Can only be provided if streamMode is Provisioned. + * * @default 1 */ readonly shardCount?: number; @@ -752,9 +755,15 @@ export class Stream extends StreamBase { physicalName: props.streamName, }); - const shardCount = props.shardCount || 1; + let shardCount = props.shardCount; + const streamMode = props.streamMode ?? StreamMode.PROVISIONED; - const streamMode = props.streamMode; + if (streamMode === StreamMode.ON_DEMAND && shardCount !== undefined) { + throw new Error(`streamMode must be set to ${StreamMode.PROVISIONED} (default) when specifying shardCount`); + } + if (streamMode === StreamMode.PROVISIONED && shardCount === undefined) { + shardCount = 1; + } const retentionPeriodHours = props.retentionPeriod?.toHours() ?? 24; if (!Token.isUnresolved(retentionPeriodHours)) { diff --git a/packages/@aws-cdk/aws-kinesis/test/integ.stream-dashboard.expected.json b/packages/@aws-cdk/aws-kinesis/test/integ.stream-dashboard.expected.json index 28a166e76fd1f..19b702e60830d 100644 --- a/packages/@aws-cdk/aws-kinesis/test/integ.stream-dashboard.expected.json +++ b/packages/@aws-cdk/aws-kinesis/test/integ.stream-dashboard.expected.json @@ -4,6 +4,9 @@ "Type": "AWS::Kinesis::Stream", "Properties": { "ShardCount": 1, + "StreamModeDetails": { + "StreamMode": "PROVISIONED" + }, "RetentionPeriodHours": 24, "StreamEncryption": { "Fn::If": [ @@ -203,4 +206,4 @@ ] } } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-kinesis/test/integ.stream.expected.json b/packages/@aws-cdk/aws-kinesis/test/integ.stream.expected.json index 41230acc599a2..e4e0a7b73bd68 100644 --- a/packages/@aws-cdk/aws-kinesis/test/integ.stream.expected.json +++ b/packages/@aws-cdk/aws-kinesis/test/integ.stream.expected.json @@ -72,6 +72,9 @@ "Type": "AWS::Kinesis::Stream", "Properties": { "ShardCount": 1, + "StreamModeDetails": { + "StreamMode": "PROVISIONED" + }, "RetentionPeriodHours": 24, "StreamEncryption": { "Fn::If": [ @@ -110,4 +113,4 @@ ] } } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-kinesis/test/stream.test.ts b/packages/@aws-cdk/aws-kinesis/test/stream.test.ts index cad461b3250ca..8e4fa385133dd 100644 --- a/packages/@aws-cdk/aws-kinesis/test/stream.test.ts +++ b/packages/@aws-cdk/aws-kinesis/test/stream.test.ts @@ -22,6 +22,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { 'Fn::If': [ @@ -75,6 +78,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { 'Fn::If': [ @@ -94,6 +100,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { 'Fn::If': [ @@ -158,6 +167,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 2, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { 'Fn::If': [ @@ -212,6 +224,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 168, StreamEncryption: { 'Fn::If': [ @@ -283,6 +298,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { EncryptionType: 'KMS', @@ -319,6 +337,9 @@ describe('Kinesis data streams', () => { // THEN expect(stack).toHaveResource('AWS::Kinesis::Stream', { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { EncryptionType: 'KMS', @@ -365,8 +386,11 @@ describe('Kinesis data streams', () => { }); expect(stack).toHaveResource('AWS::Kinesis::Stream', { - RetentionPeriodHours: 24, ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, + RetentionPeriodHours: 24, StreamEncryption: { EncryptionType: 'KMS', KeyId: stack.resolve(explicitKey.keyArn), @@ -374,11 +398,11 @@ describe('Kinesis data streams', () => { }); }), - test.each([StreamMode.ON_DEMAND, StreamMode.PROVISIONED])('uses explicit capacity mode %s', (mode: StreamMode) => { + test('uses explicit provisioned streamMode', () => { const stack = new Stack(); new Stream(stack, 'MyStream', { - streamMode: mode, + streamMode: StreamMode.PROVISIONED, }); expect(stack).toMatchTemplate({ @@ -386,10 +410,66 @@ describe('Kinesis data streams', () => { MyStream5C050E93: { Type: 'AWS::Kinesis::Stream', Properties: { + RetentionPeriodHours: 24, ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, + StreamEncryption: { + 'Fn::If': [ + 'AwsCdkKinesisEncryptedStreamsUnsupportedRegions', + { + Ref: 'AWS::NoValue', + }, + { + EncryptionType: 'KMS', + KeyId: 'alias/aws/kinesis', + }, + ], + }, + }, + }, + }, + Conditions: { + AwsCdkKinesisEncryptedStreamsUnsupportedRegions: { + 'Fn::Or': [ + { + 'Fn::Equals': [ + { + Ref: 'AWS::Region', + }, + 'cn-north-1', + ], + }, + { + 'Fn::Equals': [ + { + Ref: 'AWS::Region', + }, + 'cn-northwest-1', + ], + }, + ], + }, + }, + }); + }); + + test('uses explicit on-demand streamMode', () => { + const stack = new Stack(); + + new Stream(stack, 'MyStream', { + streamMode: StreamMode.ON_DEMAND, + }); + + expect(stack).toMatchTemplate({ + Resources: { + MyStream5C050E93: { + Type: 'AWS::Kinesis::Stream', + Properties: { RetentionPeriodHours: 24, StreamModeDetails: { - StreamMode: StreamMode[mode], + StreamMode: StreamMode.ON_DEMAND, }, StreamEncryption: { 'Fn::If': [ @@ -431,6 +511,17 @@ describe('Kinesis data streams', () => { }); }); + test('throws when using shardCount with on-demand streamMode', () => { + const stack = new Stack(); + + expect(() => { + new Stream(stack, 'MyStream', { + shardCount: 2, + streamMode: StreamMode.ON_DEMAND, + }); + }).toThrow(`streamMode must be set to ${StreamMode.PROVISIONED} (default) when specifying shardCount`); + }); + test('grantRead creates and attaches a policy with read only access to the principal', () => { const stack = new Stack(); const stream = new Stream(stack, 'MyStream', { @@ -536,6 +627,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { EncryptionType: 'KMS', @@ -695,6 +789,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { EncryptionType: 'KMS', @@ -845,8 +942,11 @@ describe('Kinesis data streams', () => { MyStream5C050E93: { Type: 'AWS::Kinesis::Stream', Properties: { - RetentionPeriodHours: 24, ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, + RetentionPeriodHours: 24, StreamEncryption: { EncryptionType: 'KMS', KeyId: { @@ -915,6 +1015,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { 'Fn::If': [ @@ -1003,6 +1106,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { 'Fn::If': [ @@ -1083,6 +1189,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { 'Fn::If': [ @@ -1173,6 +1282,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { 'Fn::If': [ @@ -1255,6 +1367,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: 24, StreamEncryption: { 'Fn::If': [ @@ -1372,6 +1487,9 @@ describe('Kinesis data streams', () => { Type: 'AWS::Kinesis::Stream', Properties: { ShardCount: 1, + StreamModeDetails: { + StreamMode: StreamMode.PROVISIONED, + }, RetentionPeriodHours: { Ref: 'myretentionperiod', }, diff --git a/packages/@aws-cdk/aws-kinesisfirehose/test/integ.delivery-stream.source-stream.expected.json b/packages/@aws-cdk/aws-kinesisfirehose/test/integ.delivery-stream.source-stream.expected.json index 896d0487a091c..a37685e2e47e8 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose/test/integ.delivery-stream.source-stream.expected.json +++ b/packages/@aws-cdk/aws-kinesisfirehose/test/integ.delivery-stream.source-stream.expected.json @@ -75,6 +75,9 @@ "Type": "AWS::Kinesis::Stream", "Properties": { "ShardCount": 1, + "StreamModeDetails": { + "StreamMode": "PROVISIONED" + }, "RetentionPeriodHours": 24, "StreamEncryption": { "Fn::If": [ @@ -281,4 +284,4 @@ } } } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-lambda-event-sources/test/integ.kinesis.expected.json b/packages/@aws-cdk/aws-lambda-event-sources/test/integ.kinesis.expected.json index c1690f2f03aac..d14d727e34999 100644 --- a/packages/@aws-cdk/aws-lambda-event-sources/test/integ.kinesis.expected.json +++ b/packages/@aws-cdk/aws-lambda-event-sources/test/integ.kinesis.expected.json @@ -116,6 +116,9 @@ "Type": "AWS::Kinesis::Stream", "Properties": { "ShardCount": 1, + "StreamModeDetails": { + "StreamMode": "PROVISIONED" + }, "RetentionPeriodHours": 24, "StreamEncryption": { "Fn::If": [ @@ -154,4 +157,4 @@ ] } } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-lambda-event-sources/test/integ.kinesiswithdlq.expected.json b/packages/@aws-cdk/aws-lambda-event-sources/test/integ.kinesiswithdlq.expected.json index 616adaef6a86a..a16660f565e76 100644 --- a/packages/@aws-cdk/aws-lambda-event-sources/test/integ.kinesiswithdlq.expected.json +++ b/packages/@aws-cdk/aws-lambda-event-sources/test/integ.kinesiswithdlq.expected.json @@ -140,6 +140,9 @@ "Type": "AWS::Kinesis::Stream", "Properties": { "ShardCount": 1, + "StreamModeDetails": { + "StreamMode": "PROVISIONED" + }, "RetentionPeriodHours": 24, "StreamEncryption": { "Fn::If": [ @@ -203,4 +206,4 @@ ] } } -} \ No newline at end of file +} From ad7374a7475ea4eeb33720f0f07cdd10cd9c9702 Mon Sep 17 00:00:00 2001 From: Pat Myron Date: Thu, 6 Jan 2022 05:44:07 -0800 Subject: [PATCH 007/374] chore(cloudfront): encryption and enforceSSL on distribution s3 loggingBucket (#18264) could pass another bucket, but automatically created buckets are convenient/popular, so worth improving defaults https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-cloudfront.Distribution.html https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-cloudfront.CloudFrontWebDistribution.html https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-s3.Bucket.html --- ```sh # updated integ snapshots packages/@aws-cdk/aws-cloudfront $ /workspace/aws-cdk/tools/\@aws-cdk/cdk-integ-tools/bin/cdk-integ integ.cloudfront-bucket-logging.js integ.distribution-extensive.js --dry-run ``` ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../aws-cloudfront/lib/distribution.ts | 5 +- .../aws-cloudfront/lib/web-distribution.ts | 5 +- ...eg.cloudfront-bucket-logging.expected.json | 58 +++++++++++++++++++ ...integ.distribution-extensive.expected.json | 58 +++++++++++++++++++ 4 files changed, 124 insertions(+), 2 deletions(-) diff --git a/packages/@aws-cdk/aws-cloudfront/lib/distribution.ts b/packages/@aws-cdk/aws-cloudfront/lib/distribution.ts index c593edd9efec7..c13a791bfe9b1 100644 --- a/packages/@aws-cdk/aws-cloudfront/lib/distribution.ts +++ b/packages/@aws-cdk/aws-cloudfront/lib/distribution.ts @@ -430,7 +430,10 @@ export class Distribution extends Resource implements IDistribution { throw new Error('Explicitly disabled logging but provided a logging bucket.'); } - const bucket = props.logBucket ?? new s3.Bucket(this, 'LoggingBucket'); + const bucket = props.logBucket ?? new s3.Bucket(this, 'LoggingBucket', { + encryption: s3.BucketEncryption.S3_MANAGED, + enforceSSL: true, + }); return { bucket: bucket.bucketRegionalDomainName, includeCookies: props.logIncludesCookies, diff --git a/packages/@aws-cdk/aws-cloudfront/lib/web-distribution.ts b/packages/@aws-cdk/aws-cloudfront/lib/web-distribution.ts index c0a332a2e1b89..e590b5740c847 100644 --- a/packages/@aws-cdk/aws-cloudfront/lib/web-distribution.ts +++ b/packages/@aws-cdk/aws-cloudfront/lib/web-distribution.ts @@ -954,7 +954,10 @@ export class CloudFrontWebDistribution extends cdk.Resource implements IDistribu } if (props.loggingConfig) { - this.loggingBucket = props.loggingConfig.bucket || new s3.Bucket(this, 'LoggingBucket'); + this.loggingBucket = props.loggingConfig.bucket || new s3.Bucket(this, 'LoggingBucket', { + encryption: s3.BucketEncryption.S3_MANAGED, + enforceSSL: true, + }); distributionConfig = { ...distributionConfig, logging: { diff --git a/packages/@aws-cdk/aws-cloudfront/test/integ.cloudfront-bucket-logging.expected.json b/packages/@aws-cdk/aws-cloudfront/test/integ.cloudfront-bucket-logging.expected.json index 36a334898a57f..2da6475aff16b 100644 --- a/packages/@aws-cdk/aws-cloudfront/test/integ.cloudfront-bucket-logging.expected.json +++ b/packages/@aws-cdk/aws-cloudfront/test/integ.cloudfront-bucket-logging.expected.json @@ -75,9 +75,67 @@ }, "AnAmazingWebsiteProbably2LoggingBucket222F7CE9": { "Type": "AWS::S3::Bucket", + "Properties": { + "BucketEncryption": { + "ServerSideEncryptionConfiguration": [ + { + "ServerSideEncryptionByDefault": { + "SSEAlgorithm": "AES256" + } + } + ] + } + }, "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, + "AnAmazingWebsiteProbably2LoggingBucketPolicyE298B456": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "Bucket": { + "Ref": "AnAmazingWebsiteProbably2LoggingBucket222F7CE9" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "AnAmazingWebsiteProbably2LoggingBucket222F7CE9", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "AnAmazingWebsiteProbably2LoggingBucket222F7CE9", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + } + } + }, "AnAmazingWebsiteProbably2CFDistribution7C1CCD12": { "Type": "AWS::CloudFront::Distribution", "Properties": { diff --git a/packages/@aws-cdk/aws-cloudfront/test/integ.distribution-extensive.expected.json b/packages/@aws-cdk/aws-cloudfront/test/integ.distribution-extensive.expected.json index 4ddd5ddb8d373..ca4afcdc81c17 100644 --- a/packages/@aws-cdk/aws-cloudfront/test/integ.distribution-extensive.expected.json +++ b/packages/@aws-cdk/aws-cloudfront/test/integ.distribution-extensive.expected.json @@ -2,9 +2,67 @@ "Resources": { "MyDistLoggingBucket9B8976BC": { "Type": "AWS::S3::Bucket", + "Properties": { + "BucketEncryption": { + "ServerSideEncryptionConfiguration": [ + { + "ServerSideEncryptionByDefault": { + "SSEAlgorithm": "AES256" + } + } + ] + } + }, "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, + "MyDistLoggingBucketPolicy847D8D11": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "Bucket": { + "Ref": "MyDistLoggingBucket9B8976BC" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "MyDistLoggingBucket9B8976BC", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "MyDistLoggingBucket9B8976BC", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + } + } + }, "MyDistDB88FD9A": { "Type": "AWS::CloudFront::Distribution", "Properties": { From 0ed5e85b99563cebfe90d136d1290b936620b9d9 Mon Sep 17 00:00:00 2001 From: Pat Myron Date: Thu, 6 Jan 2022 06:28:37 -0800 Subject: [PATCH 008/374] chore(ec2): enforceSSL on flowLog s3 bucket (#18271) could pass another bucket, but automatically created buckets are convenient/popular, so worth improving defaults https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-ec2.FlowLog.html https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-s3.Bucket.html --- ```sh # updated integ snapshots packages/@aws-cdk/aws-ec2 $ /workspace/aws-cdk/tools/\@aws-cdk/cdk-integ-tools/bin/cdk-integ --dry-run ``` ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../@aws-cdk/aws-ec2/lib/vpc-flow-logs.ts | 1 + .../test/integ.vpc-flow-logs.expected.json | 47 +++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/packages/@aws-cdk/aws-ec2/lib/vpc-flow-logs.ts b/packages/@aws-cdk/aws-ec2/lib/vpc-flow-logs.ts index fdc4a06ad2227..e7a2b881d91be 100644 --- a/packages/@aws-cdk/aws-ec2/lib/vpc-flow-logs.ts +++ b/packages/@aws-cdk/aws-ec2/lib/vpc-flow-logs.ts @@ -198,6 +198,7 @@ class S3Destination extends FlowLogDestination { if (this.props.s3Bucket === undefined) { s3Bucket = new s3.Bucket(scope, 'Bucket', { encryption: s3.BucketEncryption.UNENCRYPTED, + enforceSSL: true, removalPolicy: RemovalPolicy.RETAIN, }); } else { diff --git a/packages/@aws-cdk/aws-ec2/test/integ.vpc-flow-logs.expected.json b/packages/@aws-cdk/aws-ec2/test/integ.vpc-flow-logs.expected.json index ab9eb13b2c415..5164db5b00faf 100644 --- a/packages/@aws-cdk/aws-ec2/test/integ.vpc-flow-logs.expected.json +++ b/packages/@aws-cdk/aws-ec2/test/integ.vpc-flow-logs.expected.json @@ -527,6 +527,53 @@ "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, + "VPCFlowLogsS3BucketPolicyB2C2A045": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "Bucket": { + "Ref": "VPCFlowLogsS3BucketFB7DC2BE" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "VPCFlowLogsS3BucketFB7DC2BE", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "VPCFlowLogsS3BucketFB7DC2BE", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + } + } + }, "VPCFlowLogsS3FlowLogB5256CFF": { "Type": "AWS::EC2::FlowLog", "Properties": { From f475b8496d8f0b59c8293f2601122569400172e1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jan 2022 15:24:47 +0000 Subject: [PATCH 009/374] chore(deps): bump actions/setup-node from 2.5.0 to 2.5.1 (#18242) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/setup-node](https://github.com/actions/setup-node) from 2.5.0 to 2.5.1.
Release notes

Sourced from actions/setup-node's releases.

Fix logic of error handling for npm warning and uncaught exception

In scope of this release we fix logic of error handling related to caching (actions/setup-node#358) and (actions/setup-node#359).

In the previous behaviour we relied on stderr output to throw error. The warning messages from package managers can be written to the stderr's output. For now the action will throw an error only if exit code differs from zero. Besides, we add logic to сatch and log unhandled exceptions.

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/setup-node&package-manager=github_actions&previous-version=2.5.0&new-version=2.5.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--- .github/workflows/yarn-upgrade.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/yarn-upgrade.yml b/.github/workflows/yarn-upgrade.yml index 6b8097ca1a580..f592be929092b 100644 --- a/.github/workflows/yarn-upgrade.yml +++ b/.github/workflows/yarn-upgrade.yml @@ -18,7 +18,7 @@ jobs: uses: actions/checkout@v2 - name: Set up Node - uses: actions/setup-node@v2.5.0 + uses: actions/setup-node@v2.5.1 with: node-version: 12 From ccea06950d8317040bc5439625f5b03e0ebe6721 Mon Sep 17 00:00:00 2001 From: Kyle Laker Date: Thu, 6 Jan 2022 16:02:56 -0500 Subject: [PATCH 010/374] chore: remove node 13 incompatibility warnings (#18284) Because the required minimum version is 14+, warning about specific versions of NodeJS 13 seems redundant (as they are likely incompatible anyway). Relates to #18144 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- CONTRIBUTING.md | 1 - README.md | 2 -- 2 files changed, 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b945ca8395896..92c67747d9692 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -54,7 +54,6 @@ The following tools need to be installed on your system prior to installing the - [Node.js >= 14.15.0](https://nodejs.org/download/release/latest-v14.x/) - We recommend using a version in [Active LTS](https://nodejs.org/en/about/releases/) - - ⚠️ versions `13.0.0` to `13.6.0` are not supported due to compatibility issues with our dependencies. - [Yarn >= 1.19.1, < 2](https://yarnpkg.com/lang/en/docs/install) - [.NET Core SDK 3.1.x](https://www.microsoft.com/net/download) - [Python >= 3.6.5, < 4.0](https://www.python.org/downloads/release/python-365/) diff --git a/README.md b/README.md index d018fb8c7a65b..4a1eab8dd9eb8 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,6 @@ The CDK is available in the following languages: * JavaScript, TypeScript ([Node.js ≥ 14.15.0](https://nodejs.org/download/release/latest-v14.x/)) - We recommend using a version in [Active LTS](https://nodejs.org/en/about/releases/) - - ⚠️ versions `13.0.0` to `13.6.0` are not supported due to compatibility issues with our dependencies. * Python ([Python ≥ 3.6](https://www.python.org/downloads/)) * Java ([Java ≥ 8](https://www.oracle.com/technetwork/java/javase/downloads/index.html) and [Maven ≥ 3.5.4](https://maven.apache.org/download.cgi)) * .NET ([.NET Core ≥ 3.1](https://dotnet.microsoft.com/download)) @@ -78,7 +77,6 @@ For a detailed walkthrough, see the [tutorial](https://docs.aws.amazon.com/cdk/l ### At a glance Install or update the [AWS CDK CLI] from npm (requires [Node.js ≥ 14.15.0](https://nodejs.org/download/release/latest-v14.x/)). We recommend using a version in [Active LTS](https://nodejs.org/en/about/releases/) -⚠️ versions `13.0.0` to `13.6.0` are not supported due to compatibility issues with our dependencies. ```console $ npm i -g aws-cdk From 9da07809437bcbe982c6ce3f8b3f626830d0eba9 Mon Sep 17 00:00:00 2001 From: Pat Myron Date: Thu, 6 Jan 2022 13:49:01 -0800 Subject: [PATCH 011/374] chore(codepipeline): enforce SSL on implicitly-created S3 buckets (#18268) could pass another bucket, but automatically created buckets are convenient/popular, so worth improving defaults https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-codepipeline.Pipeline.html https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-s3.Bucket.html --- ```sh # updated integ snapshots packages/@aws-cdk/aws-events-targets $ /workspace/aws-cdk/tools/\@aws-cdk/cdk-integ-tools/bin/cdk-integ --dry-run packages/@aws-cdk/aws-codepipeline-actions $ /workspace/aws-cdk/tools/\@aws-cdk/cdk-integ-tools/bin/cdk-integ --dry-run packages/@aws-cdk/pipelines $ /workspace/aws-cdk/tools/\@aws-cdk/cdk-integ-tools/bin/cdk-integ --dry-run ``` --- ```sh # eslint fix /workspace/aws-cdk/node_modules/eslint/bin/eslint.js packages/@aws-cdk/aws-codepipeline-actions/test/ --ext=.ts --fix ``` ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../cloudformation-pipeline-actions.test.ts | 13 ++++ ...g.cfn-template-from-repo.lit.expected.json | 69 ++++++++++++++++--- ...yed-through-codepipeline.lit.expected.json | 69 ++++++++++++++++--- .../test/integ.lambda-pipeline.expected.json | 49 ++++++++++++- .../integ.pipeline-alexa-deploy.expected.json | 47 +++++++++++++ .../test/integ.pipeline-cfn.expected.json | 47 +++++++++++++ ...g.pipeline-code-commit-build.expected.json | 69 ++++++++++++++++--- .../integ.pipeline-code-commit.expected.json | 69 ++++++++++++++++--- .../test/integ.pipeline-events.expected.json | 69 ++++++++++++++++--- ...integ.pipeline-stepfunctions.expected.json | 47 +++++++++++++ .../@aws-cdk/aws-codepipeline/lib/pipeline.ts | 1 + .../lib/private/cross-region-support-stack.ts | 1 + .../integ.pipeline-event-target.expected.json | 47 +++++++++++++ .../integ.newpipeline-with-vpc.expected.json | 34 +++++++++ .../test/integ.newpipeline.expected.json | 34 +++++++++ .../integ.pipeline-security.expected.json | 34 +++++++++ ...ne-with-assets-single-upload.expected.json | 34 +++++++++ .../integ.pipeline-with-assets.expected.json | 34 +++++++++ .../test/integ.pipeline.expected.json | 34 +++++++++ .../test/__snapshots__/synth.test.js.snap | 47 +++++++++++++ 20 files changed, 792 insertions(+), 56 deletions(-) diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/cloudformation-pipeline-actions.test.ts b/packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/cloudformation-pipeline-actions.test.ts index 955e54107789a..e7f46a685ed55 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/cloudformation-pipeline-actions.test.ts +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/cloudformation/cloudformation-pipeline-actions.test.ts @@ -666,6 +666,19 @@ describe('CloudFormation Pipeline Actions', () => { expect(pipelineStack).toHaveResourceLike('AWS::S3::BucketPolicy', { 'PolicyDocument': { 'Statement': [ + { + 'Action': 's3:*', + 'Condition': { + 'Bool': { 'aws:SecureTransport': 'false' }, + }, + 'Effect': 'Deny', + 'Principal': { + 'AWS': '*', + }, + 'Resource': [ + + ], + }, { 'Action': [ 's3:GetObject*', diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.cfn-template-from-repo.lit.expected.json b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.cfn-template-from-repo.lit.expected.json index 6d5734f005c70..3c59dc4a9305d 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.cfn-template-from-repo.lit.expected.json +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.cfn-template-from-repo.lit.expected.json @@ -41,6 +41,20 @@ "UpdateReplacePolicy": "Delete", "DeletionPolicy": "Delete" }, + "PipelineArtifactsBucketEncryptionKeyAlias5C510EEE": { + "Type": "AWS::KMS::Alias", + "Properties": { + "AliasName": "alias/codepipeline-awscdkcodepipelinecloudformationpipeline7dbde619", + "TargetKeyId": { + "Fn::GetAtt": [ + "PipelineArtifactsBucketEncryptionKey01D58D69", + "Arn" + ] + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, "PipelineArtifactsBucket22248F97": { "Type": "AWS::S3::Bucket", "Properties": { @@ -69,19 +83,52 @@ "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, - "PipelineArtifactsBucketEncryptionKeyAlias5C510EEE": { - "Type": "AWS::KMS::Alias", + "PipelineArtifactsBucketPolicyD4F9712A": { + "Type": "AWS::S3::BucketPolicy", "Properties": { - "AliasName": "alias/codepipeline-awscdkcodepipelinecloudformationpipeline7dbde619", - "TargetKeyId": { - "Fn::GetAtt": [ - "PipelineArtifactsBucketEncryptionKey01D58D69", - "Arn" - ] + "Bucket": { + "Ref": "PipelineArtifactsBucket22248F97" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" } - }, - "UpdateReplacePolicy": "Delete", - "DeletionPolicy": "Delete" + } }, "PipelineRoleD68726F7": { "Type": "AWS::IAM::Role", diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.lambda-deployed-through-codepipeline.lit.expected.json b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.lambda-deployed-through-codepipeline.lit.expected.json index 29afc8317c758..61cef35a009c0 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.lambda-deployed-through-codepipeline.lit.expected.json +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.lambda-deployed-through-codepipeline.lit.expected.json @@ -35,6 +35,20 @@ "UpdateReplacePolicy": "Delete", "DeletionPolicy": "Delete" }, + "PipelineArtifactsBucketEncryptionKeyAlias5C510EEE": { + "Type": "AWS::KMS::Alias", + "Properties": { + "AliasName": "alias/codepipeline-pipelinestackpipeline9db740af", + "TargetKeyId": { + "Fn::GetAtt": [ + "PipelineArtifactsBucketEncryptionKey01D58D69", + "Arn" + ] + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, "PipelineArtifactsBucket22248F97": { "Type": "AWS::S3::Bucket", "Properties": { @@ -63,19 +77,52 @@ "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, - "PipelineArtifactsBucketEncryptionKeyAlias5C510EEE": { - "Type": "AWS::KMS::Alias", + "PipelineArtifactsBucketPolicyD4F9712A": { + "Type": "AWS::S3::BucketPolicy", "Properties": { - "AliasName": "alias/codepipeline-pipelinestackpipeline9db740af", - "TargetKeyId": { - "Fn::GetAtt": [ - "PipelineArtifactsBucketEncryptionKey01D58D69", - "Arn" - ] + "Bucket": { + "Ref": "PipelineArtifactsBucket22248F97" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" } - }, - "UpdateReplacePolicy": "Delete", - "DeletionPolicy": "Delete" + } }, "PipelineRoleD68726F7": { "Type": "AWS::IAM::Role", diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.lambda-pipeline.expected.json b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.lambda-pipeline.expected.json index b925c611a0591..53614fd854b19 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.lambda-pipeline.expected.json +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.lambda-pipeline.expected.json @@ -77,6 +77,53 @@ "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, + "PipelineArtifactsBucketPolicyD4F9712A": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "Bucket": { + "Ref": "PipelineArtifactsBucket22248F97" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + } + } + }, "PipelineRoleD68726F7": { "Type": "AWS::IAM::Role", "Properties": { @@ -788,4 +835,4 @@ ] } } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-alexa-deploy.expected.json b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-alexa-deploy.expected.json index db1378dc62f7c..6662d025f667b 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-alexa-deploy.expected.json +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-alexa-deploy.expected.json @@ -87,6 +87,53 @@ "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, + "PipelineArtifactsBucketPolicyD4F9712A": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "Bucket": { + "Ref": "PipelineArtifactsBucket22248F97" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + } + } + }, "PipelineRoleD68726F7": { "Type": "AWS::IAM::Role", "Properties": { diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-cfn.expected.json b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-cfn.expected.json index 47d57c1301cb4..707f673e11ea1 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-cfn.expected.json +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-cfn.expected.json @@ -77,6 +77,53 @@ "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, + "PipelineArtifactsBucketPolicyD4F9712A": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "Bucket": { + "Ref": "PipelineArtifactsBucket22248F97" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + } + } + }, "PipelineRoleD68726F7": { "Type": "AWS::IAM::Role", "Properties": { diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-code-commit-build.expected.json b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-code-commit-build.expected.json index 410001cabd59b..53dffb9a5b78c 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-code-commit-build.expected.json +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-code-commit-build.expected.json @@ -295,6 +295,20 @@ "UpdateReplacePolicy": "Delete", "DeletionPolicy": "Delete" }, + "PipelineArtifactsBucketEncryptionKeyAlias5C510EEE": { + "Type": "AWS::KMS::Alias", + "Properties": { + "AliasName": "alias/codepipeline-awscdkcodepipelinecodecommitcodebuildpipeline9540e1f5", + "TargetKeyId": { + "Fn::GetAtt": [ + "PipelineArtifactsBucketEncryptionKey01D58D69", + "Arn" + ] + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, "PipelineArtifactsBucket22248F97": { "Type": "AWS::S3::Bucket", "Properties": { @@ -323,19 +337,52 @@ "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, - "PipelineArtifactsBucketEncryptionKeyAlias5C510EEE": { - "Type": "AWS::KMS::Alias", + "PipelineArtifactsBucketPolicyD4F9712A": { + "Type": "AWS::S3::BucketPolicy", "Properties": { - "AliasName": "alias/codepipeline-awscdkcodepipelinecodecommitcodebuildpipeline9540e1f5", - "TargetKeyId": { - "Fn::GetAtt": [ - "PipelineArtifactsBucketEncryptionKey01D58D69", - "Arn" - ] + "Bucket": { + "Ref": "PipelineArtifactsBucket22248F97" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" } - }, - "UpdateReplacePolicy": "Delete", - "DeletionPolicy": "Delete" + } }, "PipelineRoleD68726F7": { "Type": "AWS::IAM::Role", diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-code-commit.expected.json b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-code-commit.expected.json index 5bd2974d1ceb8..ed452beed9f7a 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-code-commit.expected.json +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-code-commit.expected.json @@ -106,6 +106,20 @@ "UpdateReplacePolicy": "Delete", "DeletionPolicy": "Delete" }, + "PipelineArtifactsBucketEncryptionKeyAlias5C510EEE": { + "Type": "AWS::KMS::Alias", + "Properties": { + "AliasName": "alias/codepipeline-awscdkcodepipelinecodecommitpipelinef780ca18", + "TargetKeyId": { + "Fn::GetAtt": [ + "PipelineArtifactsBucketEncryptionKey01D58D69", + "Arn" + ] + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, "PipelineArtifactsBucket22248F97": { "Type": "AWS::S3::Bucket", "Properties": { @@ -134,19 +148,52 @@ "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, - "PipelineArtifactsBucketEncryptionKeyAlias5C510EEE": { - "Type": "AWS::KMS::Alias", + "PipelineArtifactsBucketPolicyD4F9712A": { + "Type": "AWS::S3::BucketPolicy", "Properties": { - "AliasName": "alias/codepipeline-awscdkcodepipelinecodecommitpipelinef780ca18", - "TargetKeyId": { - "Fn::GetAtt": [ - "PipelineArtifactsBucketEncryptionKey01D58D69", - "Arn" - ] + "Bucket": { + "Ref": "PipelineArtifactsBucket22248F97" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucket22248F97", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" } - }, - "UpdateReplacePolicy": "Delete", - "DeletionPolicy": "Delete" + } }, "PipelineRoleD68726F7": { "Type": "AWS::IAM::Role", diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-events.expected.json b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-events.expected.json index 19be710545e7e..d464eef509bdd 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-events.expected.json +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-events.expected.json @@ -35,6 +35,20 @@ "UpdateReplacePolicy": "Delete", "DeletionPolicy": "Delete" }, + "MyPipelineArtifactsBucketEncryptionKeyAlias9D4F8C59": { + "Type": "AWS::KMS::Alias", + "Properties": { + "AliasName": "alias/codepipeline-awscdkpipelineeventtargetmypipeline4ae5d407", + "TargetKeyId": { + "Fn::GetAtt": [ + "MyPipelineArtifactsBucketEncryptionKey8BF0A7F3", + "Arn" + ] + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, "MyPipelineArtifactsBucket727923DD": { "Type": "AWS::S3::Bucket", "Properties": { @@ -63,19 +77,52 @@ "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, - "MyPipelineArtifactsBucketEncryptionKeyAlias9D4F8C59": { - "Type": "AWS::KMS::Alias", + "MyPipelineArtifactsBucketPolicyDFDA675B": { + "Type": "AWS::S3::BucketPolicy", "Properties": { - "AliasName": "alias/codepipeline-awscdkpipelineeventtargetmypipeline4ae5d407", - "TargetKeyId": { - "Fn::GetAtt": [ - "MyPipelineArtifactsBucketEncryptionKey8BF0A7F3", - "Arn" - ] + "Bucket": { + "Ref": "MyPipelineArtifactsBucket727923DD" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "MyPipelineArtifactsBucket727923DD", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "MyPipelineArtifactsBucket727923DD", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" } - }, - "UpdateReplacePolicy": "Delete", - "DeletionPolicy": "Delete" + } }, "MyPipelineRoleC0D47CA4": { "Type": "AWS::IAM::Role", diff --git a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-stepfunctions.expected.json b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-stepfunctions.expected.json index 03e04ca5348b1..fe94e8c305ad7 100644 --- a/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-stepfunctions.expected.json +++ b/packages/@aws-cdk/aws-codepipeline-actions/test/integ.pipeline-stepfunctions.expected.json @@ -120,6 +120,53 @@ "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, + "MyPipelineArtifactsBucketPolicyDFDA675B": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "Bucket": { + "Ref": "MyPipelineArtifactsBucket727923DD" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "MyPipelineArtifactsBucket727923DD", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "MyPipelineArtifactsBucket727923DD", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + } + } + }, "MyPipelineRoleC0D47CA4": { "Type": "AWS::IAM::Role", "Properties": { diff --git a/packages/@aws-cdk/aws-codepipeline/lib/pipeline.ts b/packages/@aws-cdk/aws-codepipeline/lib/pipeline.ts index 7e02b83d03939..6dad03744c8e7 100644 --- a/packages/@aws-cdk/aws-codepipeline/lib/pipeline.ts +++ b/packages/@aws-cdk/aws-codepipeline/lib/pipeline.ts @@ -399,6 +399,7 @@ export class Pipeline extends PipelineBase { bucketName: PhysicalName.GENERATE_IF_NEEDED, encryptionKey, encryption: encryptionKey ? s3.BucketEncryption.KMS : s3.BucketEncryption.KMS_MANAGED, + enforceSSL: true, blockPublicAccess: new s3.BlockPublicAccess(s3.BlockPublicAccess.BLOCK_ALL), removalPolicy: RemovalPolicy.RETAIN, }); diff --git a/packages/@aws-cdk/aws-codepipeline/lib/private/cross-region-support-stack.ts b/packages/@aws-cdk/aws-codepipeline/lib/private/cross-region-support-stack.ts index 9ab45f8942436..5decade872f1e 100644 --- a/packages/@aws-cdk/aws-codepipeline/lib/private/cross-region-support-stack.ts +++ b/packages/@aws-cdk/aws-codepipeline/lib/private/cross-region-support-stack.ts @@ -77,6 +77,7 @@ export class CrossRegionSupportConstruct extends Construct { bucketName: cdk.PhysicalName.GENERATE_IF_NEEDED, encryption: encryptionAlias ? s3.BucketEncryption.KMS : s3.BucketEncryption.KMS_MANAGED, encryptionKey: encryptionAlias, + enforceSSL: true, blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL, }); } diff --git a/packages/@aws-cdk/aws-events-targets/test/codepipeline/integ.pipeline-event-target.expected.json b/packages/@aws-cdk/aws-events-targets/test/codepipeline/integ.pipeline-event-target.expected.json index 7f2c9d48da34b..bc6bec13d1d5f 100644 --- a/packages/@aws-cdk/aws-events-targets/test/codepipeline/integ.pipeline-event-target.expected.json +++ b/packages/@aws-cdk/aws-events-targets/test/codepipeline/integ.pipeline-event-target.expected.json @@ -83,6 +83,53 @@ "UpdateReplacePolicy": "Retain", "DeletionPolicy": "Retain" }, + "pipelinePipeline22F2A91DArtifactsBucketPolicy269103C2": { + "Type": "AWS::S3::BucketPolicy", + "Properties": { + "Bucket": { + "Ref": "pipelinePipeline22F2A91DArtifactsBucketC1799DCD" + }, + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "pipelinePipeline22F2A91DArtifactsBucketC1799DCD", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "pipelinePipeline22F2A91DArtifactsBucketC1799DCD", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + } + } + }, "pipelinePipeline22F2A91DRole58B7B05E": { "Type": "AWS::IAM::Role", "Properties": { diff --git a/packages/@aws-cdk/pipelines/test/integ.newpipeline-with-vpc.expected.json b/packages/@aws-cdk/pipelines/test/integ.newpipeline-with-vpc.expected.json index 1180a0c03f971..0ea92e8bd1fe0 100644 --- a/packages/@aws-cdk/pipelines/test/integ.newpipeline-with-vpc.expected.json +++ b/packages/@aws-cdk/pipelines/test/integ.newpipeline-with-vpc.expected.json @@ -544,6 +544,40 @@ }, "PolicyDocument": { "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + }, { "Action": [ "s3:GetObject*", diff --git a/packages/@aws-cdk/pipelines/test/integ.newpipeline.expected.json b/packages/@aws-cdk/pipelines/test/integ.newpipeline.expected.json index 13a2fa4b5a954..c73962569d56f 100644 --- a/packages/@aws-cdk/pipelines/test/integ.newpipeline.expected.json +++ b/packages/@aws-cdk/pipelines/test/integ.newpipeline.expected.json @@ -30,6 +30,40 @@ }, "PolicyDocument": { "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + }, { "Action": [ "s3:GetObject*", diff --git a/packages/@aws-cdk/pipelines/test/integ.pipeline-security.expected.json b/packages/@aws-cdk/pipelines/test/integ.pipeline-security.expected.json index 7f9c7a276e8b6..996f6abad6abc 100644 --- a/packages/@aws-cdk/pipelines/test/integ.pipeline-security.expected.json +++ b/packages/@aws-cdk/pipelines/test/integ.pipeline-security.expected.json @@ -103,6 +103,40 @@ }, "PolicyDocument": { "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "TestPipelineArtifactsBucket026AF2F9", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "TestPipelineArtifactsBucket026AF2F9", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + }, { "Action": [ "s3:GetObject*", diff --git a/packages/@aws-cdk/pipelines/test/integ.pipeline-with-assets-single-upload.expected.json b/packages/@aws-cdk/pipelines/test/integ.pipeline-with-assets-single-upload.expected.json index 1e3b8da882e14..f7be2a6cc06e5 100644 --- a/packages/@aws-cdk/pipelines/test/integ.pipeline-with-assets-single-upload.expected.json +++ b/packages/@aws-cdk/pipelines/test/integ.pipeline-with-assets-single-upload.expected.json @@ -103,6 +103,40 @@ }, "PolicyDocument": { "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + }, { "Action": [ "s3:GetObject*", diff --git a/packages/@aws-cdk/pipelines/test/integ.pipeline-with-assets.expected.json b/packages/@aws-cdk/pipelines/test/integ.pipeline-with-assets.expected.json index 86ea5b197c1fe..4137af5b8b0c6 100644 --- a/packages/@aws-cdk/pipelines/test/integ.pipeline-with-assets.expected.json +++ b/packages/@aws-cdk/pipelines/test/integ.pipeline-with-assets.expected.json @@ -103,6 +103,40 @@ }, "PolicyDocument": { "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + }, { "Action": [ "s3:GetObject*", diff --git a/packages/@aws-cdk/pipelines/test/integ.pipeline.expected.json b/packages/@aws-cdk/pipelines/test/integ.pipeline.expected.json index 0cdaf3a38943d..8f2e83582be38 100644 --- a/packages/@aws-cdk/pipelines/test/integ.pipeline.expected.json +++ b/packages/@aws-cdk/pipelines/test/integ.pipeline.expected.json @@ -103,6 +103,40 @@ }, "PolicyDocument": { "Statement": [ + { + "Action": "s3:*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + }, + "Effect": "Deny", + "Principal": { + "AWS": "*" + }, + "Resource": [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "PipelineArtifactsBucketAEA9A052", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + }, { "Action": [ "s3:GetObject*", diff --git a/packages/decdk/test/__snapshots__/synth.test.js.snap b/packages/decdk/test/__snapshots__/synth.test.js.snap index 433eca7032550..93c4dfdf336c4 100644 --- a/packages/decdk/test/__snapshots__/synth.test.js.snap +++ b/packages/decdk/test/__snapshots__/synth.test.js.snap @@ -1962,6 +1962,53 @@ Object { "Type": "AWS::KMS::Alias", "UpdateReplacePolicy": "Delete", }, + "PipelineArtifactsBucketPolicyD4F9712A": Object { + "Properties": Object { + "Bucket": Object { + "Ref": "PipelineArtifactsBucket22248F97", + }, + "PolicyDocument": Object { + "Statement": Array [ + Object { + "Action": "s3:*", + "Condition": Object { + "Bool": Object { + "aws:SecureTransport": "false", + }, + }, + "Effect": "Deny", + "Principal": Object { + "AWS": "*", + }, + "Resource": Array [ + Object { + "Fn::GetAtt": Array [ + "PipelineArtifactsBucket22248F97", + "Arn", + ], + }, + Object { + "Fn::Join": Array [ + "", + Array [ + Object { + "Fn::GetAtt": Array [ + "PipelineArtifactsBucket22248F97", + "Arn", + ], + }, + "/*", + ], + ], + }, + ], + }, + ], + "Version": "2012-10-17", + }, + }, + "Type": "AWS::S3::BucketPolicy", + }, "PipelineBuildCodePipelineActionRoleD77A08E6": Object { "Properties": Object { "AssumeRolePolicyDocument": Object { From 272b6b1abe22b7415eed5cdba82056d154fc31d7 Mon Sep 17 00:00:00 2001 From: wplucinsky Date: Thu, 6 Jan 2022 14:35:25 -0800 Subject: [PATCH 012/374] fix(appmesh): allow a Virtual Node have as a backend a Virtual Service whose provider is that Node (#18265) Addresses a circular dependency issue between Virtual Nodes and Virtual Services that works for Virtual Services created with a defined `virtualServiceName` and a randomly generated name. One such example of this problem was a case where a Virtual Node had a backend that is a Virtual Service whose provider was given as the same Virtual Node. This led to the Virtual Node being dependent on the creation of the Virtual Service, and the Virtual Service being dependent on the creation of the Virtual Node. Fixes #17322 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../integ.all-service-addons.expected.json | 14 +--- packages/@aws-cdk/aws-appmesh/README.md | 18 +++++ .../aws-appmesh/lib/shared-interfaces.ts | 9 ++- .../aws-appmesh/test/integ.mesh.expected.json | 28 +------ .../@aws-cdk/aws-appmesh/test/mesh.test.ts | 4 +- .../aws-appmesh/test/virtual-node.test.ts | 81 ++++++++++++++++--- 6 files changed, 103 insertions(+), 51 deletions(-) diff --git a/packages/@aws-cdk-containers/ecs-service-extensions/test/integ.all-service-addons.expected.json b/packages/@aws-cdk-containers/ecs-service-extensions/test/integ.all-service-addons.expected.json index bd9224e586933..04e3fb87689fa 100644 --- a/packages/@aws-cdk-containers/ecs-service-extensions/test/integ.all-service-addons.expected.json +++ b/packages/@aws-cdk-containers/ecs-service-extensions/test/integ.all-service-addons.expected.json @@ -3185,22 +3185,12 @@ "Backends": [ { "VirtualService": { - "VirtualServiceName": { - "Fn::GetAtt": [ - "namevirtualservice3DDDDF1E", - "VirtualServiceName" - ] - } + "VirtualServiceName": "name.production" } }, { "VirtualService": { - "VirtualServiceName": { - "Fn::GetAtt": [ - "greetingvirtualservice60AD3AD9", - "VirtualServiceName" - ] - } + "VirtualServiceName": "greeting.production" } } ], diff --git a/packages/@aws-cdk/aws-appmesh/README.md b/packages/@aws-cdk/aws-appmesh/README.md index 54bc79fade4c3..d168d56c73a7e 100644 --- a/packages/@aws-cdk/aws-appmesh/README.md +++ b/packages/@aws-cdk/aws-appmesh/README.md @@ -236,6 +236,24 @@ The `backends` property can be added with `node.addBackend()`. In the example, w The `backendDefaults` property is added to the node while creating the virtual node. These are the virtual node's default settings for all backends. +The `VirtualNode.addBackend()` method is especially useful if you want to create a circular traffic flow by having a Virtual Service as a backend whose provider is that same Virtual Node: + +```ts +declare const mesh: appmesh.Mesh; + +const node = new appmesh.VirtualNode(this, 'node', { + mesh, + serviceDiscovery: appmesh.ServiceDiscovery.dns('node'), +}); + +const virtualService = new appmesh.VirtualService(this, 'service-1', { + virtualServiceProvider: appmesh.VirtualServiceProvider.virtualNode(node), + virtualServiceName: 'service1.domain.local', +}); + +node.addBackend(appmesh.Backend.virtualService(virtualService)); +``` + ### Adding TLS to a listener The `tls` property specifies TLS configuration when creating a listener for a virtual node or a virtual gateway. diff --git a/packages/@aws-cdk/aws-appmesh/lib/shared-interfaces.ts b/packages/@aws-cdk/aws-appmesh/lib/shared-interfaces.ts index 8e672ac535bdc..7598ac959e98b 100644 --- a/packages/@aws-cdk/aws-appmesh/lib/shared-interfaces.ts +++ b/packages/@aws-cdk/aws-appmesh/lib/shared-interfaces.ts @@ -238,7 +238,14 @@ class VirtualServiceBackend extends Backend { return { virtualServiceBackend: { virtualService: { - virtualServiceName: this.virtualService.virtualServiceName, + /** + * We want to use the name of the Virtual Service here directly instead of + * a `{ 'Fn::GetAtt' }` CFN expression. This avoids a circular dependency in + * the case where this Virtual Node is the Virtual Service's provider. + */ + virtualServiceName: cdk.Token.isUnresolved(this.virtualService.virtualServiceName) + ? (this.virtualService as any).physicalName + : this.virtualService.virtualServiceName, clientPolicy: this.tlsClientPolicy ? { tls: renderTlsClientPolicy(scope, this.tlsClientPolicy), diff --git a/packages/@aws-cdk/aws-appmesh/test/integ.mesh.expected.json b/packages/@aws-cdk/aws-appmesh/test/integ.mesh.expected.json index 4b6c3e54f543e..3649195cf9c7e 100644 --- a/packages/@aws-cdk/aws-appmesh/test/integ.mesh.expected.json +++ b/packages/@aws-cdk/aws-appmesh/test/integ.mesh.expected.json @@ -1040,22 +1040,12 @@ "Backends": [ { "VirtualService": { - "VirtualServiceName": { - "Fn::GetAtt": [ - "service6D174F83", - "VirtualServiceName" - ] - } + "VirtualServiceName": "service1.domain.local" } }, { "VirtualService": { - "VirtualServiceName": { - "Fn::GetAtt": [ - "service27C65CF7D", - "VirtualServiceName" - ] - } + "VirtualServiceName": "service2.domain.local" } } ], @@ -1111,12 +1101,7 @@ "Backends": [ { "VirtualService": { - "VirtualServiceName": { - "Fn::GetAtt": [ - "service3859EB104", - "VirtualServiceName" - ] - } + "VirtualServiceName": "service3.domain.local" } } ], @@ -1241,12 +1226,7 @@ "Backends": [ { "VirtualService": { - "VirtualServiceName": { - "Fn::GetAtt": [ - "service4983B61EE", - "VirtualServiceName" - ] - } + "VirtualServiceName": "service4.domain.local" } } ], diff --git a/packages/@aws-cdk/aws-appmesh/test/mesh.test.ts b/packages/@aws-cdk/aws-appmesh/test/mesh.test.ts index f30d1562416da..294701e0d550a 100644 --- a/packages/@aws-cdk/aws-appmesh/test/mesh.test.ts +++ b/packages/@aws-cdk/aws-appmesh/test/mesh.test.ts @@ -314,9 +314,7 @@ describe('mesh', () => { Backends: [ { VirtualService: { - VirtualServiceName: { - 'Fn::GetAtt': ['service1A48078CF', 'VirtualServiceName'], - }, + VirtualServiceName: 'service1.domain.local', }, }, ], diff --git a/packages/@aws-cdk/aws-appmesh/test/virtual-node.test.ts b/packages/@aws-cdk/aws-appmesh/test/virtual-node.test.ts index 82bc98c6267ef..842c061f68a15 100644 --- a/packages/@aws-cdk/aws-appmesh/test/virtual-node.test.ts +++ b/packages/@aws-cdk/aws-appmesh/test/virtual-node.test.ts @@ -41,24 +41,18 @@ describe('virtual node', () => { Backends: [ { VirtualService: { - VirtualServiceName: { - 'Fn::GetAtt': ['service1A48078CF', 'VirtualServiceName'], - }, + VirtualServiceName: 'service1.domain.local', }, }, { VirtualService: { - VirtualServiceName: { - 'Fn::GetAtt': ['service27C65CF7D', 'VirtualServiceName'], - }, + VirtualServiceName: 'service2.domain.local', }, }, ], }, MeshOwner: ABSENT, }); - - }); }); @@ -458,9 +452,7 @@ describe('virtual node', () => { Backends: [ { VirtualService: { - VirtualServiceName: { - 'Fn::GetAtt': ['service1A48078CF', 'VirtualServiceName'], - }, + VirtualServiceName: 'service1.domain.local', ClientPolicy: { TLS: { Ports: [8080, 8081], @@ -478,8 +470,75 @@ describe('virtual node', () => { ], }, }); + }); + + test('you can add a Virtual Service as a backend to a Virtual Node which is the provider for that Virtual Service', () => { + // GIVEN + const stack = new cdk.Stack(); + + // WHEN + const mesh = new appmesh.Mesh(stack, 'mesh', { + meshName: 'test-mesh', + }); + + const node = new appmesh.VirtualNode(stack, 'test-node', { + mesh, + serviceDiscovery: appmesh.ServiceDiscovery.dns('test'), + }); + + const myVirtualService = new appmesh.VirtualService(stack, 'service-1', { + virtualServiceProvider: appmesh.VirtualServiceProvider.virtualNode(node), + virtualServiceName: 'service1.domain.local', + }); + + node.addBackend(appmesh.Backend.virtualService(myVirtualService)); + + // THEN + expect(stack).toHaveResourceLike('AWS::AppMesh::VirtualNode', { + Spec: { + Backends: [ + { + VirtualService: { + VirtualServiceName: 'service1.domain.local', + }, + }, + ], + }, + }); + }); + + test('you can add a Virtual Service with an automated name as a backend to a Virtual Node which is the provider for that Virtual Service, ', () => { + // GIVEN + const stack = new cdk.Stack(); + // WHEN + const mesh = new appmesh.Mesh(stack, 'mesh', { + meshName: 'test-mesh', + }); + const node = new appmesh.VirtualNode(stack, 'test-node', { + mesh, + serviceDiscovery: appmesh.ServiceDiscovery.dns('test'), + }); + + const myVirtualService = new appmesh.VirtualService(stack, 'service-1', { + virtualServiceProvider: appmesh.VirtualServiceProvider.virtualNode(node), + }); + + node.addBackend(appmesh.Backend.virtualService(myVirtualService)); + + // THEN + expect(stack).toHaveResourceLike('AWS::AppMesh::VirtualNode', { + Spec: { + Backends: [ + { + VirtualService: { + VirtualServiceName: 'service1', + }, + }, + ], + }, + }); }); }); From 7f4eb764d4a5142debd932e1f2b75116a7c8e2d1 Mon Sep 17 00:00:00 2001 From: AWS CDK Automation <43080478+aws-cdk-automation@users.noreply.github.com> Date: Fri, 7 Jan 2022 01:47:58 -0800 Subject: [PATCH 013/374] docs(cfnspec): update CloudFormation documentation (#18303) Co-authored-by: AWS CDK Team --- .../spec-source/cfn-docs/cfn-docs.json | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json b/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json index 8836c7612e0dc..8340dcdfab2e6 100644 --- a/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json +++ b/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json @@ -6989,7 +6989,7 @@ "description": "Use event selectors to further specify the management and data event settings for your trail. By default, trails created without specific event selectors will be configured to log all read and write management events, and no data events. When an event occurs in your account, CloudTrail evaluates the event selector for all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.\n\nYou can configure up to five event selectors for a trail.\n\nYou cannot apply both event selectors and advanced event selectors to a trail.", "properties": { "DataResources": "CloudTrail supports data event logging for Amazon S3 objects and AWS Lambda functions. You can specify up to 250 resources for an individual event selector, but the total number of data resources cannot exceed 250 across all event selectors in a trail. This limit does not apply if you configure resource logging for all data events.\n\nFor more information, see [Data Events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html#logging-data-events) and [Limits in AWS CloudTrail](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) in the *AWS CloudTrail User Guide* .", - "ExcludeManagementEventSources": "An optional list of service event sources from which you do not want management events to be logged on your trail. In this release, the list can be empty (disables the filter), or it can filter out AWS Key Management Service or Amazon RDS Data API events by containing `kms.amazonaws.com` or `rdsdata.amazonaws.com` . By default, `ExcludeManagementEventSources` is empty, and AWS KMS and Amazon RDS Data API events are logged to your trail. You can exclude management event sources only in regions that support the event source.", + "ExcludeManagementEventSources": "An optional list of service event sources from which you do not want management events to be logged on your trail. In this release, the list can be empty (disables the filter), or it can filter out AWS Key Management Service or Amazon RDS Data API events by containing `kms.amazonaws.com` or `rdsdata.amazonaws.com` . By default, `ExcludeManagementEventSources` is empty, and AWS KMS and Amazon RDS Data API events are logged to your trail.", "IncludeManagementEvents": "Specify if you want your event selector to include management events for your trail.\n\nFor more information, see [Management Events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html#logging-management-events) in the *AWS CloudTrail User Guide* .\n\nBy default, the value is `true` .\n\nThe first copy of management events is free. You are charged for additional copies of management events that you are logging on any subsequent trail in the same region. For more information about CloudTrail pricing, see [AWS CloudTrail Pricing](https://docs.aws.amazon.com/cloudtrail/pricing/) .", "ReadWriteType": "Specify if you want your trail to log read-only events, write-only events, or all. For example, the EC2 `GetConsoleOutput` is a read-only API operation and `RunInstances` is a write-only API operation.\n\nBy default, the value is `All` ." } @@ -14221,7 +14221,7 @@ "description": "The Kubernetes network configuration for the cluster.", "properties": { "IpFamily": "", - "ServiceIpv4Cidr": "The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. We recommend that you specify a block that does not overlap with resources in other networks that are peered or connected to your VPC. The block must meet the following requirements:\n\n- Within one of the following private IP address blocks: 10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16.\n- Doesn't overlap with any CIDR block assigned to the VPC that you selected for VPC.\n- Between /24 and /12.\n\n> You can only specify a custom CIDR block when you create a cluster and can't change this value once the cluster is created.", + "ServiceIpv4Cidr": "Don't specify a value if you select `ipv6` for *ipFamily* . The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. We recommend that you specify a block that does not overlap with resources in other networks that are peered or connected to your VPC. The block must meet the following requirements:\n\n- Within one of the following private IP address blocks: 10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16.\n- Doesn't overlap with any CIDR block assigned to the VPC that you selected for VPC.\n- Between /24 and /12.\n\n> You can only specify a custom CIDR block when you create a cluster and can't change this value once the cluster is created.", "ServiceIpv6Cidr": "" } }, @@ -14288,9 +14288,9 @@ "NodegroupName": "The name associated with an Amazon EKS managed node group.", "Ref": "`Ref` returns the resource name. For example:\n\n`{ \"Ref\": \"myNodegroup\" }`\n\nFor the Amazon EKS node group `myNodegroup` , Ref returns the physical resource ID of the node group. For example, `/` ." }, - "description": "Creates a managed node group for an Amazon EKS cluster. You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster.\n\nAn Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by AWS for an Amazon EKS cluster. Each node group uses a version of the Amazon EKS optimized Amazon Linux 2 AMI. For more information, see [Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) in the *Amazon EKS User Guide* .", + "description": "Creates a managed node group for an Amazon EKS cluster. You can only create a node group for your cluster that is equal to the current Kubernetes version for the cluster. All node groups are created with the latest AMI release version for the respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using a launch template. For more information about using launch templates, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) .\n\nAn Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that are managed by AWS for an Amazon EKS cluster. Each node group uses a version of the Amazon EKS optimized Amazon Linux 2 AMI. For more information, see [Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) in the *Amazon EKS User Guide* .", "properties": { - "AmiType": "The AMI type for your node group. The following values are examples:\n\n- `AL2_x86_64` \u2013 Use for Amazon Linux 2 non-GPU instances.\n- `AL2_x86_64_GPU` \u2013 Use for Amazon Linux 2 GPU instances.\n- `AL2_ARM_64` \u2013 Use for Amazon Linux 2 Arm instances.\n- `CUSTOM` \u2013 Use when specifying a custom AMI ID with a launch template. For more information, see [Specifying an AMI](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami) in the *Amazon EKS User Guide* .\n- `BOTTLEROCKET_ARM_64` \u2013 Use for Bottlerocket Arm instances.\n- `BOTTLEROCKET_x86_64` \u2013 Use for Bottlerocket x86_64 instances.\n\nIf you specify `launchTemplate` , and your launch template uses a custom AMI, then don't specify `amiType` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", + "AmiType": "The AMI type for your node group. GPU instance types should use the `AL2_x86_64_GPU` AMI type. Non-GPU instances should use the `AL2_x86_64` AMI type. Arm instances should use the `AL2_ARM_64` AMI type. All types use the Amazon EKS optimized Amazon Linux 2 AMI. If you specify `launchTemplate` , and your launch template uses a custom AMI, then don't specify `amiType` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", "CapacityType": "The capacity type of your managed node group.", "ClusterName": "The name of the cluster to create the node group in.", "DiskSize": "The root device disk size (in GiB) for your node group instances. The default disk size is 20 GiB. If you specify `launchTemplate` , then don't specify `diskSize` , or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see [Launch template support](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) in the *Amazon EKS User Guide* .", @@ -20326,7 +20326,7 @@ "properties": { "DisplayName": "Field that represents a friendly name in the console for the custom metric; it doesn't have to be unique. Don't use this name as the metric identifier in the device metric report. Can be updated.", "MetricName": "The name of the custom metric. This will be used in the metric report submitted from the device/thing. It shouldn't begin with `aws:` . Cannot be updated once it's defined.", - "MetricType": "The type of the custom metric. Types include `string-list` , `ip-address-list` , `number-list` , and `number` .", + "MetricType": "The type of the custom metric. Types include `string-list` , `ip-address-list` , and `number-list` .", "Tags": "Metadata that can be used to manage the custom metric." } }, @@ -20430,13 +20430,13 @@ }, "AWS::IoT::Logging": { "attributes": { - "Ref": "`Ref` returns\n\n`{ \"Ref\": \"AccountId\" }`" + "Ref": "" }, - "description": "Sets the logging options in the V2 logging service.", + "description": "", "properties": { - "AccountId": "The unique identifier of the account to use when writing to CloudWatch logs.", - "DefaultLogLevel": "The logging level. Valid values are `DEBUG` , `INFO` , `ERROR` , `WARN` , and `DISABLED` .", - "RoleArn": "The ARN of the role that allows IoT to write to Cloudwatch logs." + "AccountId": "", + "DefaultLogLevel": "", + "RoleArn": "" } }, "AWS::IoT::MitigationAction": { @@ -20554,14 +20554,14 @@ }, "AWS::IoT::ResourceSpecificLogging": { "attributes": { - "Ref": "`Ref` returns\n\n`{ \"Ref\": \"TargetType:TargetName\" }`", - "TargetId": "The unique identifier of the log target." + "Ref": "", + "TargetId": "" }, - "description": "Sets the logging options for a specific resource in the V2 logging service.", + "description": "", "properties": { - "LogLevel": "The logging level. Valid values are `DEBUG` , `INFO` , `ERROR` , `WARN` , and `DISABLED` .", - "TargetName": "The log target name.", - "TargetType": "The log target type." + "LogLevel": "", + "TargetName": "", + "TargetType": "" } }, "AWS::IoT::ScheduledAudit": { From aff607a65e061ade5c3ec9e29f82fdaa8b57f638 Mon Sep 17 00:00:00 2001 From: Otavio Macedo Date: Fri, 7 Jan 2022 12:34:22 +0000 Subject: [PATCH 014/374] fix(lambda-python): asset files are generated inside the 'asset-input' folder (#18306) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changes the asset structure from: ``` ├── asset.993168cfa75b295eeea55bf603340284b9be46ebc079e4965f2c16f5470efda5 │ └── asset-input │ ├── __init__.py │ └── app.py ``` to: ``` ├── asset.993168cfa75b295eeea55bf603340284b9be46ebc079e4965f2c16f5470efda5 │ ├── __init__.py │ └── app.py ``` Fixes https://github.com/aws/aws-cdk/issues/18301. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../@aws-cdk/aws-lambda-python/lib/bundling.ts | 4 ++-- .../aws-lambda-python/test/bundling.test.ts | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/@aws-cdk/aws-lambda-python/lib/bundling.ts b/packages/@aws-cdk/aws-lambda-python/lib/bundling.ts index 37e52ac13435b..34dd54e3eee98 100644 --- a/packages/@aws-cdk/aws-lambda-python/lib/bundling.ts +++ b/packages/@aws-cdk/aws-lambda-python/lib/bundling.ts @@ -86,8 +86,8 @@ export class Bundling implements CdkBundlingOptions { bundlingCommands.push(packaging.exportCommand ?? ''); if (packaging.dependenciesFile) { bundlingCommands.push(`python -m pip install -r ${DependenciesFile.PIP} -t ${options.outputDir}`); - }; - bundlingCommands.push(`cp -R ${options.inputDir} ${options.outputDir}`); + } + bundlingCommands.push(`cp -R ${options.inputDir}/ ${options.outputDir}`); return bundlingCommands; } } diff --git a/packages/@aws-cdk/aws-lambda-python/test/bundling.test.ts b/packages/@aws-cdk/aws-lambda-python/test/bundling.test.ts index 5e4cf194f5151..e5f91b2ea09b8 100644 --- a/packages/@aws-cdk/aws-lambda-python/test/bundling.test.ts +++ b/packages/@aws-cdk/aws-lambda-python/test/bundling.test.ts @@ -36,7 +36,7 @@ test('Bundling a function without dependencies', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'cp -R /asset-input /asset-output', + 'cp -R /asset-input/ /asset-output', ], }), })); @@ -62,7 +62,7 @@ test('Bundling a function with requirements.txt', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'python -m pip install -r requirements.txt -t /asset-output && cp -R /asset-input /asset-output', + 'python -m pip install -r requirements.txt -t /asset-output && cp -R /asset-input/ /asset-output', ], }), })); @@ -81,7 +81,7 @@ test('Bundling Python 2.7 with requirements.txt installed', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'python -m pip install -r requirements.txt -t /asset-output && cp -R /asset-input /asset-output', + 'python -m pip install -r requirements.txt -t /asset-output && cp -R /asset-input/ /asset-output', ], }), })); @@ -101,7 +101,7 @@ test('Bundling a layer with dependencies', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input /asset-output/python', + 'python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input/ /asset-output/python', ], }), })); @@ -121,7 +121,7 @@ test('Bundling a python code layer', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'cp -R /asset-input /asset-output/python', + 'cp -R /asset-input/ /asset-output/python', ], }), })); @@ -141,7 +141,7 @@ test('Bundling a function with pipenv dependencies', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'PIPENV_VENV_IN_PROJECT=1 pipenv lock -r > requirements.txt && rm -rf .venv && python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input /asset-output/python', + 'PIPENV_VENV_IN_PROJECT=1 pipenv lock -r > requirements.txt && rm -rf .venv && python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input/ /asset-output/python', ], }), })); @@ -161,7 +161,7 @@ test('Bundling a function with poetry dependencies', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'poetry export --with-credentials --format requirements.txt --output requirements.txt && python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input /asset-output/python', + 'poetry export --with-credentials --format requirements.txt --output requirements.txt && python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input/ /asset-output/python', ], }), })); @@ -184,7 +184,7 @@ test('Bundling a function with custom bundling image', () => { image, command: [ 'bash', '-c', - 'python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input /asset-output/python', + 'python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input/ /asset-output/python', ], }), })); From b00b44efd6e402744725e711906b456a28cebc5b Mon Sep 17 00:00:00 2001 From: Otavio Macedo Date: Fri, 7 Jan 2022 12:34:22 +0000 Subject: [PATCH 015/374] fix(lambda-python): asset files are generated inside the 'asset-input' folder (#18306) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changes the asset structure from: ``` ├── asset.993168cfa75b295eeea55bf603340284b9be46ebc079e4965f2c16f5470efda5 │ └── asset-input │ ├── __init__.py │ └── app.py ``` to: ``` ├── asset.993168cfa75b295eeea55bf603340284b9be46ebc079e4965f2c16f5470efda5 │ ├── __init__.py │ └── app.py ``` Fixes https://github.com/aws/aws-cdk/issues/18301. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../@aws-cdk/aws-lambda-python/lib/bundling.ts | 4 ++-- .../aws-lambda-python/test/bundling.test.ts | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/@aws-cdk/aws-lambda-python/lib/bundling.ts b/packages/@aws-cdk/aws-lambda-python/lib/bundling.ts index 37e52ac13435b..34dd54e3eee98 100644 --- a/packages/@aws-cdk/aws-lambda-python/lib/bundling.ts +++ b/packages/@aws-cdk/aws-lambda-python/lib/bundling.ts @@ -86,8 +86,8 @@ export class Bundling implements CdkBundlingOptions { bundlingCommands.push(packaging.exportCommand ?? ''); if (packaging.dependenciesFile) { bundlingCommands.push(`python -m pip install -r ${DependenciesFile.PIP} -t ${options.outputDir}`); - }; - bundlingCommands.push(`cp -R ${options.inputDir} ${options.outputDir}`); + } + bundlingCommands.push(`cp -R ${options.inputDir}/ ${options.outputDir}`); return bundlingCommands; } } diff --git a/packages/@aws-cdk/aws-lambda-python/test/bundling.test.ts b/packages/@aws-cdk/aws-lambda-python/test/bundling.test.ts index 5e4cf194f5151..e5f91b2ea09b8 100644 --- a/packages/@aws-cdk/aws-lambda-python/test/bundling.test.ts +++ b/packages/@aws-cdk/aws-lambda-python/test/bundling.test.ts @@ -36,7 +36,7 @@ test('Bundling a function without dependencies', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'cp -R /asset-input /asset-output', + 'cp -R /asset-input/ /asset-output', ], }), })); @@ -62,7 +62,7 @@ test('Bundling a function with requirements.txt', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'python -m pip install -r requirements.txt -t /asset-output && cp -R /asset-input /asset-output', + 'python -m pip install -r requirements.txt -t /asset-output && cp -R /asset-input/ /asset-output', ], }), })); @@ -81,7 +81,7 @@ test('Bundling Python 2.7 with requirements.txt installed', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'python -m pip install -r requirements.txt -t /asset-output && cp -R /asset-input /asset-output', + 'python -m pip install -r requirements.txt -t /asset-output && cp -R /asset-input/ /asset-output', ], }), })); @@ -101,7 +101,7 @@ test('Bundling a layer with dependencies', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input /asset-output/python', + 'python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input/ /asset-output/python', ], }), })); @@ -121,7 +121,7 @@ test('Bundling a python code layer', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'cp -R /asset-input /asset-output/python', + 'cp -R /asset-input/ /asset-output/python', ], }), })); @@ -141,7 +141,7 @@ test('Bundling a function with pipenv dependencies', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'PIPENV_VENV_IN_PROJECT=1 pipenv lock -r > requirements.txt && rm -rf .venv && python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input /asset-output/python', + 'PIPENV_VENV_IN_PROJECT=1 pipenv lock -r > requirements.txt && rm -rf .venv && python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input/ /asset-output/python', ], }), })); @@ -161,7 +161,7 @@ test('Bundling a function with poetry dependencies', () => { bundling: expect.objectContaining({ command: [ 'bash', '-c', - 'poetry export --with-credentials --format requirements.txt --output requirements.txt && python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input /asset-output/python', + 'poetry export --with-credentials --format requirements.txt --output requirements.txt && python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input/ /asset-output/python', ], }), })); @@ -184,7 +184,7 @@ test('Bundling a function with custom bundling image', () => { image, command: [ 'bash', '-c', - 'python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input /asset-output/python', + 'python -m pip install -r requirements.txt -t /asset-output/python && cp -R /asset-input/ /asset-output/python', ], }), })); From 10b1c70fed38271b3ced3bdc822604b684cd7cf4 Mon Sep 17 00:00:00 2001 From: Otavio Macedo Date: Fri, 7 Jan 2022 12:41:50 +0000 Subject: [PATCH 016/374] chore(release): 1.138.1 --- CHANGELOG.md | 7 +++++++ version.v1.json | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7fe9975283c33..5f988efd2449b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,13 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +### [1.138.1](https://github.com/aws/aws-cdk/compare/v1.138.0...v1.138.1) (2022-01-07) + + +### Bug Fixes + +* **lambda-python:** asset files are generated inside the 'asset-input' folder ([#18306](https://github.com/aws/aws-cdk/issues/18306)) ([b00b44e](https://github.com/aws/aws-cdk/commit/b00b44efd6e402744725e711906b456a28cebc5b)) + ## [1.138.0](https://github.com/aws/aws-cdk/compare/v1.137.0...v1.138.0) (2022-01-04) diff --git a/version.v1.json b/version.v1.json index 7dc0a8b892bd8..35cef8a7a0a74 100644 --- a/version.v1.json +++ b/version.v1.json @@ -1,3 +1,3 @@ { - "version": "1.138.0" + "version": "1.138.1" } \ No newline at end of file From 3839bce4f395b939f99630d1203aa10db940c50e Mon Sep 17 00:00:00 2001 From: Otavio Macedo Date: Fri, 7 Jan 2022 12:43:27 +0000 Subject: [PATCH 017/374] chore: release 1.138.1 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f988efd2449b..7a7177fff221f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. -### [1.138.1](https://github.com/aws/aws-cdk/compare/v1.138.0...v1.138.1) (2022-01-07) +## [1.138.1](https://github.com/aws/aws-cdk/compare/v1.138.0...v1.138.1) (2022-01-07) ### Bug Fixes From 5ddaef491d7962616f75f170cf7547cd9229338f Mon Sep 17 00:00:00 2001 From: zehsor Date: Fri, 7 Jan 2022 23:10:51 +0100 Subject: [PATCH 018/374] fix(aws-lambda-event-sources): unsupported properties for SelfManagedKafkaEventSource and ManagedKafkaEventSource (#17965) This PR fixes a bug in the CDK where some `kafkaEventSource` properties are actually unsupported. These properties exist only for kinesis and dynamodb streams. The existing KafkaEventSourceProps Interface erroneously extends an interface that includes kinesis and dynamodb specific properties. This PR separates these properties into a `Base` interface with shared stream properties for all 3, as well as an interface for `kinesis` and `dynamodb` specific properties. Unit testing unavailable because the scope of the PR is to remove properties. It is enough to ensure that current tests still succeed. We are allowing the breaking changes specified in `allowed-breaking-changes.txt` because they never worked in the first place. Fixes #17934. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- allowed-breaking-changes.txt | 23 +++++++ .../aws-lambda-event-sources/lib/kafka.ts | 4 +- .../aws-lambda-event-sources/lib/stream.ts | 62 ++++++++++--------- 3 files changed, 59 insertions(+), 30 deletions(-) diff --git a/allowed-breaking-changes.txt b/allowed-breaking-changes.txt index fa3498335f679..56b1145d61b39 100644 --- a/allowed-breaking-changes.txt +++ b/allowed-breaking-changes.txt @@ -93,3 +93,26 @@ incompatible-argument:@aws-cdk/aws-autoscaling-hooktargets.FunctionHook.bind incompatible-argument:@aws-cdk/aws-autoscaling-hooktargets.QueueHook.bind incompatible-argument:@aws-cdk/aws-autoscaling-hooktargets.TopicHook.bind incompatible-argument:@aws-cdk/aws-autoscaling.ILifecycleHookTarget.bind + +# removed properties from kafka eventsources as they are not supported +removed:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps.bisectBatchOnError +removed:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps.maxRecordAge +removed:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps.parallelizationFactor +removed:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps.reportBatchItemFailures +removed:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps.retryAttempts +removed:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps.tumblingWindow +removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.bisectBatchOnError +removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.maxRecordAge +removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.parallelizationFactor +removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.reportBatchItemFailures +removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.retryAttempts +removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.tumblingWindow +removed:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps.bisectBatchOnError +removed:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps.maxRecordAge +removed:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps.parallelizationFactor +removed:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps.reportBatchItemFailures +removed:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps.retryAttempts +removed:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps.tumblingWindow +base-types:@aws-cdk/aws-lambda-event-sources.KafkaEventSourceProps +base-types:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps +base-types:@aws-cdk/aws-lambda-event-sources.SelfManagedKafkaEventSourceProps \ No newline at end of file diff --git a/packages/@aws-cdk/aws-lambda-event-sources/lib/kafka.ts b/packages/@aws-cdk/aws-lambda-event-sources/lib/kafka.ts index e31fac89100cb..1918ec2756c84 100644 --- a/packages/@aws-cdk/aws-lambda-event-sources/lib/kafka.ts +++ b/packages/@aws-cdk/aws-lambda-event-sources/lib/kafka.ts @@ -4,7 +4,7 @@ import * as iam from '@aws-cdk/aws-iam'; import * as lambda from '@aws-cdk/aws-lambda'; import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; import { Stack, Names } from '@aws-cdk/core'; -import { StreamEventSource, StreamEventSourceProps } from './stream'; +import { StreamEventSource, BaseStreamEventSourceProps } from './stream'; // keep this import separate from other imports to reduce chance for merge conflicts with v2-main // eslint-disable-next-line no-duplicate-imports, import/order @@ -13,7 +13,7 @@ import { Construct } from '@aws-cdk/core'; /** * Properties for a Kafka event source */ -export interface KafkaEventSourceProps extends StreamEventSourceProps { +export interface KafkaEventSourceProps extends BaseStreamEventSourceProps{ /** * The Kafka topic to subscribe to */ diff --git a/packages/@aws-cdk/aws-lambda-event-sources/lib/stream.ts b/packages/@aws-cdk/aws-lambda-event-sources/lib/stream.ts index 01288efb21a6c..462387397b629 100644 --- a/packages/@aws-cdk/aws-lambda-event-sources/lib/stream.ts +++ b/packages/@aws-cdk/aws-lambda-event-sources/lib/stream.ts @@ -5,7 +5,7 @@ import { Duration } from '@aws-cdk/core'; * The set of properties for event sources that follow the streaming model, * such as, Dynamo, Kinesis and Kafka. */ -export interface StreamEventSourceProps { +export interface BaseStreamEventSourceProps{ /** * The largest number of records that AWS Lambda will retrieve from your event * source at the time of invoking your function. Your function receives an @@ -15,25 +15,51 @@ export interface StreamEventSourceProps { * * Minimum value of 1 * * Maximum value of: * * 1000 for {@link DynamoEventSource} - * * 10000 for {@link KinesisEventSource} + * * 10000 for {@link KinesisEventSource}, {@link ManagedKafkaEventSource} and {@link SelfManagedKafkaEventSource} * * @default 100 */ readonly batchSize?: number; /** - * If the function returns an error, split the batch in two and retry. + * An Amazon SQS queue or Amazon SNS topic destination for discarded records. * - * @default false + * @default discarded records are ignored */ - readonly bisectBatchOnError?: boolean; + readonly onFailure?: lambda.IEventSourceDlq; /** - * An Amazon SQS queue or Amazon SNS topic destination for discarded records. + * Where to begin consuming the stream. + */ + readonly startingPosition: lambda.StartingPosition; + + /** + * The maximum amount of time to gather records before invoking the function. + * Maximum of Duration.minutes(5) * - * @default discarded records are ignored + * @default Duration.seconds(0) */ - readonly onFailure?: lambda.IEventSourceDlq; + readonly maxBatchingWindow?: Duration; + + /** + * If the stream event source mapping should be enabled. + * + * @default true + */ + readonly enabled?: boolean; +} + +/** + * The set of properties for event sources that follow the streaming model, + * such as, Dynamo, Kinesis. + */ +export interface StreamEventSourceProps extends BaseStreamEventSourceProps { + /** + * If the function returns an error, split the batch in two and retry. + * + * @default false + */ + readonly bisectBatchOnError?: boolean; /** * The maximum age of a record that Lambda sends to a function for processing. @@ -65,11 +91,6 @@ export interface StreamEventSourceProps { */ readonly parallelizationFactor?: number; - /** - * Where to begin consuming the stream. - */ - readonly startingPosition: lambda.StartingPosition; - /** * Allow functions to return partially successful responses for a batch of records. * @@ -79,14 +100,6 @@ export interface StreamEventSourceProps { */ readonly reportBatchItemFailures?: boolean; - /** - * The maximum amount of time to gather records before invoking the function. - * Maximum of Duration.minutes(5) - * - * @default Duration.seconds(0) - */ - readonly maxBatchingWindow?: Duration; - /** * The size of the tumbling windows to group records sent to DynamoDB or Kinesis * Valid Range: 0 - 15 minutes @@ -94,13 +107,6 @@ export interface StreamEventSourceProps { * @default - None */ readonly tumblingWindow?: Duration; - - /** - * If the stream event source mapping should be enabled. - * - * @default true - */ - readonly enabled?: boolean; } /** From 74eee1e5b8fa404dde129f001b986d615f435c73 Mon Sep 17 00:00:00 2001 From: Kaizen Conroy <36202692+kaizen3031593@users.noreply.github.com> Date: Fri, 7 Jan 2022 17:55:35 -0500 Subject: [PATCH 019/374] fix(apigatewayv2-authorizers): incorrect `identitySource` default for `WebSocketLambdaAuthorizer` (#18315) We introduced `WebSocketLambdaAuthorizer` in #16886 with an incorrect default `identitySource`, according to these [docs](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigatewayv2-authorizer.html#cfn-apigatewayv2-authorizer-identitysource). The result is that using the default `identitySource` results in a deploy-time failure. This PR fixes the error and adds documentation for the syntax for all `identitySource` possibilities. I can confirm that this default successfully passes `cdk deploy` on my local app. Fixes #18307. BREAKING CHANGE: `WebSocketLambdaAuthorizerProps.identitySource` default changes from `['$request.header.Authorization']` to `['route.request.header.Authorization']`. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../aws-apigatewayv2-authorizers/lib/websocket/lambda.ts | 8 ++++++-- .../test/websocket/lambda.test.ts | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/@aws-cdk/aws-apigatewayv2-authorizers/lib/websocket/lambda.ts b/packages/@aws-cdk/aws-apigatewayv2-authorizers/lib/websocket/lambda.ts index 2e60cbdd7b547..8b5b5c6d3fc43 100644 --- a/packages/@aws-cdk/aws-apigatewayv2-authorizers/lib/websocket/lambda.ts +++ b/packages/@aws-cdk/aws-apigatewayv2-authorizers/lib/websocket/lambda.ts @@ -28,7 +28,11 @@ export interface WebSocketLambdaAuthorizerProps { /** * The identity source for which authorization is requested. * - * @default ['$request.header.Authorization'] + * Request parameter match `'route.request.querystring|header.[a-zA-z0-9._-]+'`. + * Staged variable match `'stageVariables.[a-zA-Z0-9._-]+'`. + * Context parameter match `'context.[a-zA-Z0-9._-]+'`. + * + * @default ['route.request.header.Authorization'] */ readonly identitySource?: string[]; } @@ -56,7 +60,7 @@ export class WebSocketLambdaAuthorizer implements IWebSocketRouteAuthorizer { this.authorizer = new WebSocketAuthorizer(options.scope, this.id, { webSocketApi: options.route.webSocketApi, identitySource: this.props.identitySource ?? [ - '$request.header.Authorization', + 'route.request.header.Authorization', ], type: WebSocketAuthorizerType.LAMBDA, authorizerName: this.props.authorizerName ?? this.id, diff --git a/packages/@aws-cdk/aws-apigatewayv2-authorizers/test/websocket/lambda.test.ts b/packages/@aws-cdk/aws-apigatewayv2-authorizers/test/websocket/lambda.test.ts index c171247801911..8a62d5731ac58 100644 --- a/packages/@aws-cdk/aws-apigatewayv2-authorizers/test/websocket/lambda.test.ts +++ b/packages/@aws-cdk/aws-apigatewayv2-authorizers/test/websocket/lambda.test.ts @@ -35,7 +35,7 @@ describe('WebSocketLambdaAuthorizer', () => { Name: 'default-authorizer', AuthorizerType: 'REQUEST', IdentitySource: [ - '$request.header.Authorization', + 'route.request.header.Authorization', ], }); From 455147a5c3ed2c519279d6a523881126e0856f21 Mon Sep 17 00:00:00 2001 From: Addi Horowitz <11254819+addihorowitz@users.noreply.github.com> Date: Sun, 9 Jan 2022 17:43:11 +0200 Subject: [PATCH 020/374] docs: Add urls for blog posts (#17912) Best practices + all developer blog posts ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 4a1eab8dd9eb8..7349b1909d6b1 100644 --- a/README.md +++ b/README.md @@ -153,6 +153,12 @@ this capability, please see the ## More Resources * [CDK Workshop](https://cdkworkshop.com/) * [Construct Hub](https://constructs.dev) - Find and use open-source Cloud Development Kit (CDK) libraries +* Best Practices + * [Best practices for developing cloud applications with AWS CDK](https://aws.amazon.com/blogs/devops/best-practices-for-developing-cloud-applications-with-aws-cdk/) + * [Align with best practices while creating infrastructure using cdk aspects](https://aws.amazon.com/blogs/devops/align-with-best-practices-while-creating-infrastructure-using-cdk-aspects/) + * [Recommended AWS CDK project structure for Python applications](https://aws.amazon.com/blogs/developer/recommended-aws-cdk-project-structure-for-python-applications/) + * [Best practices for discoverability of a construct library on Construct Hub](https://aws.amazon.com/blogs/opensource/best-practices-for-discoverability-of-a-construct-library-on-construct-hub/) +* [All developer blog posts about AWS CDK](https://aws.amazon.com/blogs/developer/category/developer-tools/aws-cloud-development-kit/) * **[CDK Construction Zone](https://www.twitch.tv/collections/9kCOGphNZBYVdA)** - A Twitch live coding series hosted by the CDK team, season one episodes: * Triggers: Join us as we implement [Triggers](https://github.com/aws/aws-cdk-rfcs/issues/71), a Construct for configuring deploy time actions. Episodes 1-3: * [S1E1](https://www.twitch.tv/videos/917691798): Triggers (part 1); **Participants:** @NetaNir, @eladb, @richardhboyd From ddc2bc6ae64fe14ddb4a03122c90dfcf954f149f Mon Sep 17 00:00:00 2001 From: Shea Belsky Date: Sun, 9 Jan 2022 11:31:43 -0500 Subject: [PATCH 021/374] fix(cli): breaks due to faulty version of `colors` (#18324) Fixes https://github.com/aws/aws-cdk/issues/18322 and https://github.com/aws/aws-cdk/issues/18323 Clarifying question: I'm not sure if the `yarn.lock` file should have automatically updated itself to only refer to `colors@1.4.0` when I set the dependency within the workspace _and_ added the resolution for child dependencies. If it's expected behavior for it _not_ to update the `yarn.lock` file after adding `resolutions`, great! If I need to do something else for that to happen, let me know! ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- package.json | 3 ++- packages/@aws-cdk/aws-cloudtrail/package.json | 4 ++-- packages/@aws-cdk/cloudformation-diff/package.json | 4 ++-- packages/aws-cdk/package.json | 4 ++-- packages/awslint/package.json | 4 ++-- tools/@aws-cdk/cdk-build-tools/package.json | 4 ++-- tools/@aws-cdk/pkglint/package.json | 9 ++++++--- yarn.lock | 2 +- 8 files changed, 19 insertions(+), 15 deletions(-) diff --git a/package.json b/package.json index 462d2c605eabd..78578c6cc9811 100644 --- a/package.json +++ b/package.json @@ -31,6 +31,7 @@ "typescript": "~3.9.10" }, "resolutions": { + "colors": "1.4.0", "string-width": "^4.2.3" }, "repository": { @@ -179,4 +180,4 @@ "dependencies": { "string-width": "^4.2.3" } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-cloudtrail/package.json b/packages/@aws-cdk/aws-cloudtrail/package.json index d94555d73ae26..3a81482d9fd44 100644 --- a/packages/@aws-cdk/aws-cloudtrail/package.json +++ b/packages/@aws-cdk/aws-cloudtrail/package.json @@ -79,7 +79,7 @@ "@aws-cdk/pkglint": "0.0.0", "@types/jest": "^27.0.3", "aws-sdk": "^2.848.0", - "colors": "^1.4.0", + "colors": "1.4.0", "jest": "^27.4.5" }, "dependencies": { @@ -121,4 +121,4 @@ "publishConfig": { "tag": "latest" } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/cloudformation-diff/package.json b/packages/@aws-cdk/cloudformation-diff/package.json index 53bea429a4b8c..2a7452c11e399 100644 --- a/packages/@aws-cdk/cloudformation-diff/package.json +++ b/packages/@aws-cdk/cloudformation-diff/package.json @@ -25,7 +25,7 @@ "dependencies": { "@aws-cdk/cfnspec": "0.0.0", "@types/node": "^10.17.60", - "colors": "^1.4.0", + "colors": "1.4.0", "diff": "^5.0.0", "fast-deep-equal": "^3.1.3", "string-width": "^4.2.3", @@ -58,4 +58,4 @@ "publishConfig": { "tag": "latest-1" } -} +} \ No newline at end of file diff --git a/packages/aws-cdk/package.json b/packages/aws-cdk/package.json index 382a3de061a6a..e548614c522fb 100644 --- a/packages/aws-cdk/package.json +++ b/packages/aws-cdk/package.json @@ -77,7 +77,7 @@ "camelcase": "^6.2.1", "cdk-assets": "0.0.0", "chokidar": "^3.5.2", - "colors": "^1.4.0", + "colors": "1.4.0", "decamelize": "^5.0.1", "fs-extra": "^9.1.0", "glob": "^7.2.0", @@ -124,4 +124,4 @@ "publishConfig": { "tag": "latest-1" } -} +} \ No newline at end of file diff --git a/packages/awslint/package.json b/packages/awslint/package.json index f158b63476d76..b8fb72ad10f29 100644 --- a/packages/awslint/package.json +++ b/packages/awslint/package.json @@ -20,7 +20,7 @@ "dependencies": { "@jsii/spec": "^1.50.0", "camelcase": "^6.2.1", - "colors": "^1.4.0", + "colors": "1.4.0", "fs-extra": "^9.1.0", "jsii-reflect": "^1.50.0", "yargs": "^16.2.0" @@ -71,4 +71,4 @@ "publishConfig": { "tag": "latest-1" } -} +} \ No newline at end of file diff --git a/tools/@aws-cdk/cdk-build-tools/package.json b/tools/@aws-cdk/cdk-build-tools/package.json index 8db499fc9e8d2..7aad17fcd9e80 100644 --- a/tools/@aws-cdk/cdk-build-tools/package.json +++ b/tools/@aws-cdk/cdk-build-tools/package.json @@ -47,7 +47,7 @@ "@typescript-eslint/eslint-plugin": "^4.33.0", "@typescript-eslint/parser": "^4.33.0", "awslint": "0.0.0", - "colors": "^1.4.0", + "colors": "1.4.0", "eslint": "^7.32.0", "eslint-import-resolver-node": "^0.3.6", "eslint-import-resolver-typescript": "^2.5.0", @@ -82,4 +82,4 @@ "ubergen": { "exclude": true } -} +} \ No newline at end of file diff --git a/tools/@aws-cdk/pkglint/package.json b/tools/@aws-cdk/pkglint/package.json index 5ab8ec96fb7d5..3115ccc8023a0 100644 --- a/tools/@aws-cdk/pkglint/package.json +++ b/tools/@aws-cdk/pkglint/package.json @@ -54,15 +54,18 @@ "typescript": "~3.9.10" }, "nozem": { - "ostools": ["chmod", "cp"] + "ostools": [ + "chmod", + "cp" + ] }, "dependencies": { "case": "^1.6.3", - "colors": "^1.4.0", + "colors": "1.4.0", "fs-extra": "^9.1.0", "glob": "^7.2.0", "npm-bundled": "^1.1.2", "semver": "^7.3.5", "yargs": "^16.2.0" } -} +} \ No newline at end of file diff --git a/yarn.lock b/yarn.lock index e478f529f0427..8e58662689437 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2835,7 +2835,7 @@ color-name@~1.1.4: resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== -colors@^1.4.0: +colors@1.4.0, colors@^1.4.0: version "1.4.0" resolved "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78" integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA== From 43bf9aec0b3c5e06d5382b29f4e8e0c91cd796ca Mon Sep 17 00:00:00 2001 From: Shea Belsky Date: Sun, 9 Jan 2022 11:31:43 -0500 Subject: [PATCH 022/374] fix(cli): breaks due to faulty version of `colors` (#18324) Fixes https://github.com/aws/aws-cdk/issues/18322 and https://github.com/aws/aws-cdk/issues/18323 Clarifying question: I'm not sure if the `yarn.lock` file should have automatically updated itself to only refer to `colors@1.4.0` when I set the dependency within the workspace _and_ added the resolution for child dependencies. If it's expected behavior for it _not_ to update the `yarn.lock` file after adding `resolutions`, great! If I need to do something else for that to happen, let me know! ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- package.json | 3 ++- packages/@aws-cdk/aws-cloudtrail/package.json | 4 ++-- packages/@aws-cdk/cloudformation-diff/package.json | 4 ++-- packages/aws-cdk/package.json | 4 ++-- packages/awslint/package.json | 4 ++-- tools/@aws-cdk/cdk-build-tools/package.json | 4 ++-- tools/@aws-cdk/pkglint/package.json | 9 ++++++--- yarn.lock | 2 +- 8 files changed, 19 insertions(+), 15 deletions(-) diff --git a/package.json b/package.json index 462d2c605eabd..78578c6cc9811 100644 --- a/package.json +++ b/package.json @@ -31,6 +31,7 @@ "typescript": "~3.9.10" }, "resolutions": { + "colors": "1.4.0", "string-width": "^4.2.3" }, "repository": { @@ -179,4 +180,4 @@ "dependencies": { "string-width": "^4.2.3" } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-cloudtrail/package.json b/packages/@aws-cdk/aws-cloudtrail/package.json index d94555d73ae26..3a81482d9fd44 100644 --- a/packages/@aws-cdk/aws-cloudtrail/package.json +++ b/packages/@aws-cdk/aws-cloudtrail/package.json @@ -79,7 +79,7 @@ "@aws-cdk/pkglint": "0.0.0", "@types/jest": "^27.0.3", "aws-sdk": "^2.848.0", - "colors": "^1.4.0", + "colors": "1.4.0", "jest": "^27.4.5" }, "dependencies": { @@ -121,4 +121,4 @@ "publishConfig": { "tag": "latest" } -} +} \ No newline at end of file diff --git a/packages/@aws-cdk/cloudformation-diff/package.json b/packages/@aws-cdk/cloudformation-diff/package.json index 53bea429a4b8c..2a7452c11e399 100644 --- a/packages/@aws-cdk/cloudformation-diff/package.json +++ b/packages/@aws-cdk/cloudformation-diff/package.json @@ -25,7 +25,7 @@ "dependencies": { "@aws-cdk/cfnspec": "0.0.0", "@types/node": "^10.17.60", - "colors": "^1.4.0", + "colors": "1.4.0", "diff": "^5.0.0", "fast-deep-equal": "^3.1.3", "string-width": "^4.2.3", @@ -58,4 +58,4 @@ "publishConfig": { "tag": "latest-1" } -} +} \ No newline at end of file diff --git a/packages/aws-cdk/package.json b/packages/aws-cdk/package.json index 382a3de061a6a..e548614c522fb 100644 --- a/packages/aws-cdk/package.json +++ b/packages/aws-cdk/package.json @@ -77,7 +77,7 @@ "camelcase": "^6.2.1", "cdk-assets": "0.0.0", "chokidar": "^3.5.2", - "colors": "^1.4.0", + "colors": "1.4.0", "decamelize": "^5.0.1", "fs-extra": "^9.1.0", "glob": "^7.2.0", @@ -124,4 +124,4 @@ "publishConfig": { "tag": "latest-1" } -} +} \ No newline at end of file diff --git a/packages/awslint/package.json b/packages/awslint/package.json index f158b63476d76..b8fb72ad10f29 100644 --- a/packages/awslint/package.json +++ b/packages/awslint/package.json @@ -20,7 +20,7 @@ "dependencies": { "@jsii/spec": "^1.50.0", "camelcase": "^6.2.1", - "colors": "^1.4.0", + "colors": "1.4.0", "fs-extra": "^9.1.0", "jsii-reflect": "^1.50.0", "yargs": "^16.2.0" @@ -71,4 +71,4 @@ "publishConfig": { "tag": "latest-1" } -} +} \ No newline at end of file diff --git a/tools/@aws-cdk/cdk-build-tools/package.json b/tools/@aws-cdk/cdk-build-tools/package.json index 8db499fc9e8d2..7aad17fcd9e80 100644 --- a/tools/@aws-cdk/cdk-build-tools/package.json +++ b/tools/@aws-cdk/cdk-build-tools/package.json @@ -47,7 +47,7 @@ "@typescript-eslint/eslint-plugin": "^4.33.0", "@typescript-eslint/parser": "^4.33.0", "awslint": "0.0.0", - "colors": "^1.4.0", + "colors": "1.4.0", "eslint": "^7.32.0", "eslint-import-resolver-node": "^0.3.6", "eslint-import-resolver-typescript": "^2.5.0", @@ -82,4 +82,4 @@ "ubergen": { "exclude": true } -} +} \ No newline at end of file diff --git a/tools/@aws-cdk/pkglint/package.json b/tools/@aws-cdk/pkglint/package.json index 5ab8ec96fb7d5..3115ccc8023a0 100644 --- a/tools/@aws-cdk/pkglint/package.json +++ b/tools/@aws-cdk/pkglint/package.json @@ -54,15 +54,18 @@ "typescript": "~3.9.10" }, "nozem": { - "ostools": ["chmod", "cp"] + "ostools": [ + "chmod", + "cp" + ] }, "dependencies": { "case": "^1.6.3", - "colors": "^1.4.0", + "colors": "1.4.0", "fs-extra": "^9.1.0", "glob": "^7.2.0", "npm-bundled": "^1.1.2", "semver": "^7.3.5", "yargs": "^16.2.0" } -} +} \ No newline at end of file diff --git a/yarn.lock b/yarn.lock index e478f529f0427..8e58662689437 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2835,7 +2835,7 @@ color-name@~1.1.4: resolved "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== -colors@^1.4.0: +colors@1.4.0, colors@^1.4.0: version "1.4.0" resolved "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78" integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA== From b012846e4c7dd2c08ffa39e3adeaf4c6df09c8be Mon Sep 17 00:00:00 2001 From: epolon Date: Sun, 9 Jan 2022 18:57:12 +0200 Subject: [PATCH 023/374] chore(release): 1.138.2 --- CHANGELOG.md | 7 +++++++ version.v1.json | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a7177fff221f..f40604e3c028a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,13 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +### [1.138.2](https://github.com/aws/aws-cdk/compare/v1.138.1...v1.138.2) (2022-01-09) + + +### Bug Fixes + +* **cli:** breaks due to faulty version of `colors` ([#18324](https://github.com/aws/aws-cdk/issues/18324)) ([43bf9ae](https://github.com/aws/aws-cdk/commit/43bf9aec0b3c5e06d5382b29f4e8e0c91cd796ca)) + ## [1.138.1](https://github.com/aws/aws-cdk/compare/v1.138.0...v1.138.1) (2022-01-07) diff --git a/version.v1.json b/version.v1.json index 35cef8a7a0a74..52f052970432b 100644 --- a/version.v1.json +++ b/version.v1.json @@ -1,3 +1,3 @@ { - "version": "1.138.1" + "version": "1.138.2" } \ No newline at end of file From ac7b66fc40df4fbd5f57f6fa6ebcea2b7a305806 Mon Sep 17 00:00:00 2001 From: Eli Polonsky Date: Sun, 9 Jan 2022 19:39:47 +0200 Subject: [PATCH 024/374] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f40604e3c028a..4e1b13e5cd6bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. -### [1.138.2](https://github.com/aws/aws-cdk/compare/v1.138.1...v1.138.2) (2022-01-09) +## [1.138.2](https://github.com/aws/aws-cdk/compare/v1.138.1...v1.138.2) (2022-01-09) ### Bug Fixes From c2c87d9dd861a25dcbd9aa830e81ecb4d76ba509 Mon Sep 17 00:00:00 2001 From: Kaizen Conroy <36202692+kaizen3031593@users.noreply.github.com> Date: Mon, 10 Jan 2022 04:21:32 -0500 Subject: [PATCH 025/374] fix(pipelines): `DockerCredential.dockerHub()` silently fails auth (#18313) ### Problem: `DockerCredential.dockerHub()` silently failed to authenticate users, resulting in unexpected and intermittent throttling due to docker's policy for unauthenticated users. ### Reason: `.dockerHub()` added `index.docker.io` to the domain credentials, but the actual docker command [authenticated](https://github.com/moby/moby/blob/1e71c6cffedb79e3def696652753ea43cdc47b99/registry/config.go#L35) with `https://index.docker.io/v1/` which it was unable to find as a domain credential, thus failing to trigger `docker-credential-cdk-assets` during the `docker --config build` call. Furthermore, the credential `DockerCredential.customRegistry('https://index.docker.io/v1/', secret)` alone does not work. This would successfully trigger `docker-credential-cdk-assets` but fail to authenticate because of how `cdk-assets` handles credential lookup. The command strips the endpoint into just a hostname so in this case we try `fetchDockerLoginCredentials(awsClient, config, 'index.docker.io')` which fails: https://github.com/aws/aws-cdk/blob/4fb0309e3b93be276ab3e2d510ffc2ce35823dcd/packages/cdk-assets/bin/docker-credential-cdk-assets.ts#L32-L38 So the workaround for this bug was to specify both domains as credentials, each to satisfy a separate step of the process: ```ts dockerCredentials: [ pipelines.DockerCredential.dockerHub(secret), pipelines.DockerCredential.customRegistry('https://index.docker.io/v1/', secret), ], ``` ### Solution: This PR introduces two separate changes to address both problems. First, we change the hardcoded domain in `DockerCredential.dockerHub()` to be `https://index.docker.io/v1/`. This allows us to successfully trigger `docker-credential-cdk-assets` when the `docker --config build` command is called. Next, to make sure the credential lookup succeeds, we check for both the complete endpoint and the domain name. In this case, we will check for both `https://index.docker.io/v1/` as well as `index.docker.io`. Since `https://index.docker.io/v1/` exists in the credentials helper, authentication will succeed. Why do we still check for the domain `index.docker.io`? I don't know how custom registries or ecr works in this context and believe it to be beyond the scope of the PR. It's possible that they require the domain only for lookup. ### Testing: The change to credential lookups is unit tested in `docker-credentials.test.ts`. I confirmed that the change to `DockerCredential.dockerHub()` is successful by configuring a mock `cdk-docker-creds.json` file and successfully `cdk deploy`ing a docker image that depends on a private repository. This isn't a common use case but ensures that failure to authenticate results in failure every time. Thanks @james-mathiesen for the suggestion. ### Contributors: Thanks to @nohack for the code in `cdk-assets`. Fixes #15737. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../@aws-cdk/pipelines/lib/docker-credentials.ts | 4 ++-- .../pipelines/test/docker-credentials.test.ts | 2 +- .../cdk-assets/bin/docker-credential-cdk-assets.ts | 10 ++-------- .../cdk-assets/lib/private/docker-credentials.ts | 11 ++++++++--- packages/cdk-assets/lib/private/docker.ts | 11 ++++++++++- .../test/private/docker-credentials.test.ts | 14 +++++++++++++- 6 files changed, 36 insertions(+), 16 deletions(-) diff --git a/packages/@aws-cdk/pipelines/lib/docker-credentials.ts b/packages/@aws-cdk/pipelines/lib/docker-credentials.ts index 77b7d2c1b4381..05144d4957771 100644 --- a/packages/@aws-cdk/pipelines/lib/docker-credentials.ts +++ b/packages/@aws-cdk/pipelines/lib/docker-credentials.ts @@ -10,10 +10,10 @@ import { Fn } from '@aws-cdk/core'; export abstract class DockerCredential { /** * Creates a DockerCredential for DockerHub. - * Convenience method for `fromCustomRegistry('index.docker.io', opts)`. + * Convenience method for `customRegistry('https://index.docker.io/v1/', opts)`. */ public static dockerHub(secret: secretsmanager.ISecret, opts: ExternalDockerCredentialOptions = {}): DockerCredential { - return new ExternalDockerCredential('index.docker.io', secret, opts); + return new ExternalDockerCredential('https://index.docker.io/v1/', secret, opts); } /** diff --git a/packages/@aws-cdk/pipelines/test/docker-credentials.test.ts b/packages/@aws-cdk/pipelines/test/docker-credentials.test.ts index a2b5fc2c577dd..902c13a4129b7 100644 --- a/packages/@aws-cdk/pipelines/test/docker-credentials.test.ts +++ b/packages/@aws-cdk/pipelines/test/docker-credentials.test.ts @@ -29,7 +29,7 @@ describe('ExternalDockerCredential', () => { test('dockerHub defaults registry domain', () => { const creds = cdkp.DockerCredential.dockerHub(secret); - expect(Object.keys(creds._renderCdkAssetsConfig())).toEqual(['index.docker.io']); + expect(Object.keys(creds._renderCdkAssetsConfig())).toEqual(['https://index.docker.io/v1/']); }); test('minimal example only renders secret', () => { diff --git a/packages/cdk-assets/bin/docker-credential-cdk-assets.ts b/packages/cdk-assets/bin/docker-credential-cdk-assets.ts index b04f2ba8510bc..6dccb5521cf55 100644 --- a/packages/cdk-assets/bin/docker-credential-cdk-assets.ts +++ b/packages/cdk-assets/bin/docker-credential-cdk-assets.ts @@ -29,14 +29,8 @@ async function main() { } // Read the domain to fetch from stdin - let rawDomain = fs.readFileSync(0, { encoding: 'utf-8' }).trim(); - // Paranoid handling to ensure new URL() doesn't throw if the schema is missing. - // Not convinced docker will ever pass in a url like 'index.docker.io/v1', but just in case... - rawDomain = rawDomain.includes('://') ? rawDomain : `https://${rawDomain}`; - const domain = new URL(rawDomain).hostname; - - const credentials = await fetchDockerLoginCredentials(new DefaultAwsClient(), config, domain); - + let endpoint = fs.readFileSync(0, { encoding: 'utf-8' }).trim(); + const credentials = await fetchDockerLoginCredentials(new DefaultAwsClient(), config, endpoint); // Write the credentials back to stdout fs.writeFileSync(1, JSON.stringify(credentials)); } diff --git a/packages/cdk-assets/lib/private/docker-credentials.ts b/packages/cdk-assets/lib/private/docker-credentials.ts index b5c3f42139581..923d18d70a3ee 100644 --- a/packages/cdk-assets/lib/private/docker-credentials.ts +++ b/packages/cdk-assets/lib/private/docker-credentials.ts @@ -39,12 +39,17 @@ export function cdkCredentialsConfig(): DockerCredentialsConfig | undefined { } /** Fetches login credentials from the configured source (e.g., SecretsManager, ECR) */ -export async function fetchDockerLoginCredentials(aws: IAws, config: DockerCredentialsConfig, domain: string) { - if (!Object.keys(config.domainCredentials).includes(domain)) { +export async function fetchDockerLoginCredentials(aws: IAws, config: DockerCredentialsConfig, endpoint: string) { + // Paranoid handling to ensure new URL() doesn't throw if the schema is missing + // For official docker registry, docker will pass https://index.docker.io/v1/ + endpoint = endpoint.includes('://') ? endpoint : `https://${endpoint}`; + const domain = new URL(endpoint).hostname; + + if (!Object.keys(config.domainCredentials).includes(domain) && !Object.keys(config.domainCredentials).includes(endpoint)) { throw new Error(`unknown domain ${domain}`); } - const domainConfig = config.domainCredentials[domain]; + let domainConfig = config.domainCredentials[domain] ?? config.domainCredentials[endpoint]; if (domainConfig.secretsManagerSecretId) { const sm = await aws.secretsManagerClient({ assumeRoleArn: domainConfig.assumeRoleArn }); diff --git a/packages/cdk-assets/lib/private/docker.ts b/packages/cdk-assets/lib/private/docker.ts index e1fc54429f18f..aed2631ab2852 100644 --- a/packages/cdk-assets/lib/private/docker.ts +++ b/packages/cdk-assets/lib/private/docker.ts @@ -124,8 +124,17 @@ export class Docker { private async execute(args: string[], options: ShellOptions = {}) { const configArgs = this.configDir ? ['--config', this.configDir] : []; + const pathToCdkAssets = path.resolve(__dirname, '..', '..', 'bin'); try { - await shell(['docker', ...configArgs, ...args], { logger: this.logger, ...options }); + await shell(['docker', ...configArgs, ...args], { + logger: this.logger, + ...options, + env: { + ...process.env, + ...options.env, + PATH: `${pathToCdkAssets}${path.delimiter}${options.env?.PATH ?? process.env.PATH}`, + }, + }); } catch (e) { if (e.code === 'ENOENT') { throw new Error('Unable to execute \'docker\' in order to build a container asset. Please install \'docker\' and try again.'); diff --git a/packages/cdk-assets/test/private/docker-credentials.test.ts b/packages/cdk-assets/test/private/docker-credentials.test.ts index 6b521c67457b6..19160ccd0c880 100644 --- a/packages/cdk-assets/test/private/docker-credentials.test.ts +++ b/packages/cdk-assets/test/private/docker-credentials.test.ts @@ -97,8 +97,12 @@ describe('fetchDockerLoginCredentials', () => { await expect(fetchDockerLoginCredentials(aws, config, 'misconfigured.example.com')).rejects.toThrow(/unknown credential type/); }); + test('does not throw on correctly configured raw domain', async () => { + expect(fetchDockerLoginCredentials(aws, config, 'https://secret.example.com/v1/')).resolves; + }); + describe('SecretsManager', () => { - test('returns the credentials sucessfully if configured correctly', async () => { + test('returns the credentials sucessfully if configured correctly - domain', async () => { mockSecretWithSecretString({ username: 'secretUser', secret: 'secretPass' }); const creds = await fetchDockerLoginCredentials(aws, config, 'secret.example.com'); @@ -106,6 +110,14 @@ describe('fetchDockerLoginCredentials', () => { expect(creds).toEqual({ Username: 'secretUser', Secret: 'secretPass' }); }); + test('returns the credentials successfully if configured correctly - raw domain', async () => { + mockSecretWithSecretString({ username: 'secretUser', secret: 'secretPass' }); + + const creds = await fetchDockerLoginCredentials(aws, config, 'https://secret.example.com'); + + expect(creds).toEqual({ Username: 'secretUser', Secret: 'secretPass' }); + }); + test('throws when SecretsManager returns an error', async () => { const errMessage = "Secrets Manager can't find the specified secret."; aws.mockSecretsManager.getSecretValue = mockedApiFailure('ResourceNotFoundException', errMessage); From 883c1a32eb73cd7e4bd9d8e2cafe06a70e5264f7 Mon Sep 17 00:00:00 2001 From: AWS CDK Automation <43080478+aws-cdk-automation@users.noreply.github.com> Date: Mon, 10 Jan 2022 02:33:15 -0800 Subject: [PATCH 026/374] docs(cfnspec): update CloudFormation documentation (#18338) Co-authored-by: AWS CDK Team Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .../spec-source/cfn-docs/cfn-docs.json | 270 ++++++++++++++++-- 1 file changed, 249 insertions(+), 21 deletions(-) diff --git a/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json b/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json index 8340dcdfab2e6..337ce790bae1d 100644 --- a/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json +++ b/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json @@ -902,6 +902,7 @@ }, "AWS::ApiGateway::Deployment": { "attributes": { + "DeploymentId": "", "Ref": "`Ref` returns the deployment ID, such as `123abc` ." }, "description": "The `AWS::ApiGateway::Deployment` resource deploys an API Gateway `RestApi` resource to a stage so that clients can call the API over the internet. The stage acts as an environment.", @@ -3933,6 +3934,17 @@ "S3Key": "The S3 key of the S3 object." } }, + "AWS::AppStream::ApplicationEntitlementAssociation": { + "attributes": { + "Ref": "When you pass the logical ID of this resource to the intrinsic `Ref` function, `Ref` returns the combination of the `StackName` , `EntitlementName` , and `ApplicationIdentifier` , such as `abcdefStack|abcdefEntitlement|abcdefApplication` .\n\nFor more information about using the `Ref` function, see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) ." + }, + "description": "Associates an application to an entitlement.", + "properties": { + "ApplicationIdentifier": "The identifier of the application.", + "EntitlementName": "The name of the entitlement.", + "StackName": "The name of the stack." + } + }, "AWS::AppStream::ApplicationFleetAssociation": { "attributes": { "Ref": "When you pass the logical ID of this resource to the intrinsic `Ref` function, `Ref` returns a combination of the `FleetName` and `ApplicationArn` , such as `aabcdefgFleet|arn:aws:appstream:us-west-2:123456789123:application/abcdefg` .\n\nFor more information about using the `Ref` function, see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) ." @@ -3960,6 +3972,29 @@ "AccountPassword": "The password for the account." } }, + "AWS::AppStream::Entitlement": { + "attributes": { + "CreatedTime": "The time when the entitlement was created.", + "LastModifiedTime": "The time when the entitlement was last modified.", + "Ref": "When you pass the logical ID of this resource to the intrinsic `Ref` function, `Ref` returns the combination of the `StackName` and `Name` , such as `abcdefStack|abcdefEntitlement` .\n\nFor more information about using the `Ref` function, see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) ." + }, + "description": "Creates an entitlement to control access, based on user attributes, to specific applications within a stack. Entitlements apply to SAML 2.0 federated user identities. Amazon AppStream 2.0 user pool and streaming URL users are entitled to all applications in a stack. Entitlements don't apply to the desktop stream view application or to applications managed by a dynamic app provider using the Dynamic Application Framework.", + "properties": { + "AppVisibility": "Specifies whether to entitle all apps or only selected apps.", + "Attributes": "The attributes of the entitlement.", + "Description": "The description of the entitlement.", + "Name": "The name of the entitlement.", + "StackName": "The name of the stack." + } + }, + "AWS::AppStream::Entitlement.Attribute": { + "attributes": {}, + "description": "An attribute that belongs to an entitlement. Application entitlements work by matching a supported SAML 2.0 attribute name to a value when a user identity federates to an AppStream 2.0 SAML application.", + "properties": { + "Name": "A supported AWS IAM SAML PrincipalTag attribute that is matched to a value when a user identity federates to an AppStream 2.0 SAML application.\n\nThe following are supported values:\n\n- roles\n- department\n- organization\n- groups\n- title\n- costCenter\n- userType", + "Value": "A value that is matched to a supported SAML attribute name when a user identity federates to an AppStream 2.0 SAML application." + } + }, "AWS::AppStream::Fleet": { "attributes": {}, "description": "The `AWS::AppStream::Fleet` resource creates a fleet for Amazon AppStream 2.0. A fleet consists of streaming instances that run a specified image when using Always-On or On-Demand.", @@ -8831,10 +8866,28 @@ "properties": { "ExcludedAccounts": "A comma-separated list of accounts excluded from organization config rule.", "OrganizationConfigRuleName": "The name that you assign to organization config rule.", + "OrganizationCustomCodeRuleMetadata": "", "OrganizationCustomRuleMetadata": "An `OrganizationCustomRuleMetadata` object.", "OrganizationManagedRuleMetadata": "An `OrganizationManagedRuleMetadata` object." } }, + "AWS::Config::OrganizationConfigRule.OrganizationCustomCodeRuleMetadata": { + "attributes": {}, + "description": "", + "properties": { + "CodeText": "", + "DebugLogDeliveryAccounts": "", + "Description": "", + "InputParameters": "", + "MaximumExecutionFrequency": "", + "OrganizationConfigRuleTriggerTypes": "", + "ResourceIdScope": "", + "ResourceTypesScope": "", + "Runtime": "", + "TagKeyScope": "", + "TagValueScope": "" + } + }, "AWS::Config::OrganizationConfigRule.OrganizationCustomRuleMetadata": { "attributes": {}, "description": "An object that specifies organization custom rule metadata such as resource type, resource ID of AWS resource, Lambda function ARN, and organization trigger types that trigger AWS Config to evaluate your AWS resources against a rule. It also provides the frequency with which you want AWS Config to run evaluations for the rule if the trigger type is periodic.", @@ -11266,6 +11319,7 @@ "description": "Specifies a VPC flow log that captures IP traffic for a specified network interface, subnet, or VPC. To view the log data, use Amazon CloudWatch Logs (CloudWatch Logs) to help troubleshoot connection issues. For example, you can use a flow log to investigate why certain traffic isn't reaching an instance, which can help you diagnose overly restrictive security group rules. For more information, see [VPC Flow Logs](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html) in the *Amazon VPC User Guide* .", "properties": { "DeliverLogsPermissionArn": "The ARN for the IAM role that permits Amazon EC2 to publish flow logs to a CloudWatch Logs log group in your account.\n\nIf you specify `LogDestinationType` as `s3` , do not specify `DeliverLogsPermissionArn` or `LogGroupName` .", + "DestinationOptions": "The destination options.", "LogDestination": "The destination to which the flow log data is to be published. Flow log data can be published to a CloudWatch Logs log group or an Amazon S3 bucket. The value specified for this parameter depends on the value specified for `LogDestinationType` .\n\nIf `LogDestinationType` is not specified or `cloud-watch-logs` , specify the Amazon Resource Name (ARN) of the CloudWatch Logs log group. For example, to publish to a log group called `my-logs` , specify `arn:aws:logs:us-east-1:123456789012:log-group:my-logs` . Alternatively, use `LogGroupName` instead.\n\nIf LogDestinationType is `s3` , specify the ARN of the Amazon S3 bucket. You can also specify a subfolder in the bucket. To specify a subfolder in the bucket, use the following ARN format: `bucket_ARN/subfolder_name/` . For example, to specify a subfolder named `my-logs` in a bucket named `my-bucket` , use the following ARN: `arn:aws:s3:::my-bucket/my-logs/` . You cannot use `AWSLogs` as a subfolder name. This is a reserved term.", "LogDestinationType": "The type of destination to which the flow log data is to be published. Flow log data can be published to CloudWatch Logs or Amazon S3. To publish flow log data to CloudWatch Logs, specify `cloud-watch-logs` . To publish flow log data to Amazon S3, specify `s3` .\n\nIf you specify `LogDestinationType` as `s3` , do not specify `DeliverLogsPermissionArn` or `LogGroupName` .\n\nDefault: `cloud-watch-logs`", "LogFormat": "The fields to include in the flow log record, in the order in which they should appear. For a list of available fields, see [Flow Log Records](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html#flow-log-records) . If you omit this parameter, the flow log is created using the default format. If you specify this parameter, you must specify at least one field.\n\nSpecify the fields using the `${field-id}` format, separated by spaces.", @@ -11392,6 +11446,7 @@ "AWS::EC2::Instance": { "attributes": { "AvailabilityZone": "The Availability Zone where the specified instance is launched. For example: `us-east-1b` .\n\nYou can retrieve a list of all Availability Zones for a Region by using the [Fn::GetAZs](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-getavailabilityzones.html) intrinsic function.", + "PrivateDnsName": "The private DNS name of the specified instance. For example: `ip-10-24-34-0.ec2.internal` .", "PrivateIp": "The private IP address of the specified instance. For example: `10.24.34.0` .", "PublicDnsName": "The public DNS name of the specified instance. For example: `ec2-107-20-50-45.compute-1.amazonaws.com` .", "PublicIp": "The public IP address of the specified instance. For example: `192.0.2.0` .", @@ -11725,6 +11780,33 @@ "SpotOptions": "The options for Spot Instances." } }, + "AWS::EC2::LaunchTemplate.InstanceRequirements": { + "attributes": {}, + "description": "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.\n\nWhen you specify multiple parameters, you get instance types that satisfy all of the specified parameters. If you specify multiple values for a parameter, you get instance types that satisfy any of the specified values.\n\n> You must specify `VCpuCount` and `MemoryMiB` . All other parameters are optional. Any unspecified optional parameter is set to its default. \n\nFor more information, see [Attribute-based instance type selection for EC2 Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html) , [Attribute-based instance type selection for Spot Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-attribute-based-instance-type-selection.html) , and [Spot placement score](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) in the *Amazon EC2 User Guide* .", + "properties": { + "AcceleratorCount": "The minimum and maximum number of accelerators (GPUs, FPGAs, or AWS Inferentia chips) on an instance.\n\nTo exclude accelerator-enabled instance types, set `Max` to `0` .\n\nDefault: No minimum or maximum limits", + "AcceleratorManufacturers": "Indicates whether instance types must have accelerators by specific manufacturers.\n\n- For instance types with NVIDIA devices, specify `nvidia` .\n- For instance types with AMD devices, specify `amd` .\n- For instance types with AWS devices, specify `amazon-web-services` .\n- For instance types with Xilinx devices, specify `xilinx` .\n\nDefault: Any manufacturer", + "AcceleratorNames": "The accelerators that must be on the instance type.\n\n- For instance types with NVIDIA A100 GPUs, specify `a100` .\n- For instance types with NVIDIA V100 GPUs, specify `v100` .\n- For instance types with NVIDIA K80 GPUs, specify `k80` .\n- For instance types with NVIDIA T4 GPUs, specify `t4` .\n- For instance types with NVIDIA M60 GPUs, specify `m60` .\n- For instance types with AMD Radeon Pro V520 GPUs, specify `radeon-pro-v520` .\n- For instance types with Xilinx VU9P FPGAs, specify `vu9p` .\n\nDefault: Any accelerator", + "AcceleratorTotalMemoryMiB": "The minimum and maximum amount of total accelerator memory, in MiB.\n\nDefault: No minimum or maximum limits", + "AcceleratorTypes": "The accelerator types that must be on the instance type.\n\n- For instance types with GPU accelerators, specify `gpu` .\n- For instance types with FPGA accelerators, specify `fpga` .\n- For instance types with inference accelerators, specify `inference` .\n\nDefault: Any accelerator type", + "BareMetal": "Indicates whether bare metal instance types must be included, excluded, or required.\n\n- To include bare metal instance types, specify `included` .\n- To require only bare metal instance types, specify `required` .\n- To exclude bare metal instance types, specify `excluded` .\n\nDefault: `excluded`", + "BaselineEbsBandwidthMbps": "The minimum and maximum baseline bandwidth to Amazon EBS, in Mbps. For more information, see [Amazon EBS\u2013optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) in the *Amazon EC2 User Guide* .\n\nDefault: No minimum or maximum limits", + "BurstablePerformance": "Indicates whether burstable performance T instance types are included, excluded, or required. For more information, see [Burstable performance instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) .\n\n- To include burstable performance instance types, specify `included` .\n- To require only burstable performance instance types, specify `required` .\n- To exclude burstable performance instance types, specify `excluded` .\n\nDefault: `excluded`", + "CpuManufacturers": "The CPU manufacturers to include.\n\n- For instance types with Intel CPUs, specify `intel` .\n- For instance types with AMD CPUs, specify `amd` .\n- For instance types with AWS CPUs, specify `amazon-web-services` .\n\n> Don't confuse the CPU manufacturer with the CPU architecture. Instances will be launched with a compatible CPU architecture based on the Amazon Machine Image (AMI) that you specify in your launch template. \n\nDefault: Any manufacturer", + "ExcludedInstanceTypes": "The instance types to exclude. You can use strings with one or more wild cards, represented by an asterisk ( `*` ), to exclude an instance type, size, or generation. The following are examples: `m5.8xlarge` , `c5*.*` , `m5a.*` , `r*` , `*3*` .\n\nFor example, if you specify `c5*` ,Amazon EC2 will exclude the entire C5 instance family, which includes all C5a and C5n instance types. If you specify `m5a.*` , Amazon EC2 will exclude all the M5a instance types, but not the M5n instance types.\n\nDefault: No excluded instance types", + "InstanceGenerations": "Indicates whether current or previous generation instance types are included. The current generation instance types are recommended for use. Current generation instance types are typically the latest two to three generations in each instance family. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .\n\nFor current generation instance types, specify `current` .\n\nFor previous generation instance types, specify `previous` .\n\nDefault: Current and previous generation instance types", + "LocalStorage": "Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, [Amazon EC2 instance store](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) in the *Amazon EC2 User Guide* .\n\n- To include instance types with instance store volumes, specify `included` .\n- To require only instance types with instance store volumes, specify `required` .\n- To exclude instance types with instance store volumes, specify `excluded` .\n\nDefault: `included`", + "LocalStorageTypes": "The type of local storage that is required.\n\n- For instance types with hard disk drive (HDD) storage, specify `hdd` .\n- For instance types with solid state drive (SDD) storage, specify `sdd` .\n\nDefault: `hdd` and `sdd`", + "MemoryGiBPerVCpu": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", + "MemoryMiB": "The minimum and maximum amount of memory, in MiB.", + "NetworkInterfaceCount": "The minimum and maximum number of network interfaces.\n\nDefault: No minimum or maximum limits", + "OnDemandMaxPricePercentageOverLowestPrice": "The price protection threshold for On-Demand Instances. This is the maximum you\u2019ll pay for an On-Demand Instance, expressed as a percentage above the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\nDefault: `20`", + "RequireHibernateSupport": "Indicates whether instance types must support hibernation for On-Demand Instances.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) .\n\nDefault: `false`", + "SpotMaxPricePercentageOverLowestPrice": "The price protection threshold for Spot Instances. This is the maximum you\u2019ll pay for a Spot Instance, expressed as a percentage above the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\nDefault: `100`", + "TotalLocalStorageGB": "The minimum and maximum amount of total local storage, in GB.\n\nDefault: No minimum or maximum limits", + "VCpuCount": "The minimum and maximum number of vCPUs." + } + }, "AWS::EC2::LaunchTemplate.Ipv6Add": { "attributes": {}, "description": "Specifies an IPv6 address in an Amazon EC2 launch template.\n\n`Ipv6Add` is a property of [AWS::EC2::LaunchTemplate NetworkInterface](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-networkinterface.html) .", @@ -11750,6 +11832,7 @@ "ImageId": "The ID of the AMI.", "InstanceInitiatedShutdownBehavior": "Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).\n\nDefault: `stop`", "InstanceMarketOptions": "The market (purchasing) option for the instances.", + "InstanceRequirements": "", "InstanceType": "The instance type. For more information, see [Instance Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon Elastic Compute Cloud User Guide* .\n\nIf you specify `InstanceTypes` , you can't specify `InstanceRequirements` .", "KernelId": "The ID of the kernel.\n\nWe recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see [User Provided Kernels](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) in the *Amazon EC2 User Guide* .", "KeyName": "The name of the key pair. You can create a key pair using [CreateKeyPair](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html) or [ImportKeyPair](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportKeyPair.html) .\n\n> If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in.", @@ -13175,7 +13258,6 @@ "attributes": { "CreationTimestamp": "The date and time the VPC endpoint was created. For example: `Fri Sep 28 23:34:36 UTC 2018.`", "DnsEntries": "(Interface endpoint) The DNS entries for the endpoint. Each entry is a combination of the hosted zone ID and the DNS name. The entries are ordered as follows: regional public DNS, zonal public DNS, private DNS, and wildcard DNS. This order is not enforced for AWS Marketplace services.\n\nThe following is an example. In the first entry, the hosted zone ID is Z1HUB23UULQXV and the DNS name is vpce-01abc23456de78f9g-12abccd3.ec2.us-east-1.vpce.amazonaws.com.\n\n[\"Z1HUB23UULQXV:vpce-01abc23456de78f9g-12abccd3.ec2.us-east-1.vpce.amazonaws.com\", \"Z1HUB23UULQXV:vpce-01abc23456de78f9g-12abccd3-us-east-1a.ec2.us-east-1.vpce.amazonaws.com\", \"Z1C12344VYDITB0:ec2.us-east-1.amazonaws.com\"]\n\nIf you update the `PrivateDnsEnabled` or `SubnetIds` properties, the DNS entries in the list will change.", - "Id": "", "NetworkInterfaceIds": "(Interface endpoint) One or more network interface IDs. If you update the `PrivateDnsEnabled` or `SubnetIds` properties, the items in this list might change.", "Ref": "`Ref` returns the ID of the VPC endpoint." }, @@ -13211,7 +13293,8 @@ "properties": { "AcceptanceRequired": "Indicates whether requests from service consumers to create an endpoint to your service must be accepted.", "GatewayLoadBalancerArns": "The Amazon Resource Names (ARNs) of one or more Gateway Load Balancers.", - "NetworkLoadBalancerArns": "The Amazon Resource Names (ARNs) of one or more Network Load Balancers for your service." + "NetworkLoadBalancerArns": "The Amazon Resource Names (ARNs) of one or more Network Load Balancers for your service.", + "PayerResponsibility": "" } }, "AWS::EC2::VPCEndpointServicePermissions": { @@ -14185,7 +14268,7 @@ "ClusterSecurityGroupId": "The cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control plane to data plane communication.\n\nThis parameter is only returned by Amazon EKS clusters that support managed node groups. For more information, see [Managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) in the *Amazon EKS User Guide* .", "EncryptionConfigKeyArn": "Amazon Resource Name (ARN) or alias of the customer master key (CMK).", "Endpoint": "The endpoint for your Kubernetes API server, such as `https://5E1D0CEXAMPLEA591B746AFC5AB30262.yl4.us-west-2.eks.amazonaws.com` .", - "KubernetesNetworkConfig.ServiceIpv6Cidr": "", + "KubernetesNetworkConfig.ServiceIpv6Cidr": "The CIDR block that Kubernetes Service IP addresses are assigned from if you created a 1.21 or later cluster with version 1.10.1 or later of the Amazon VPC CNI add-on and specified `ipv6` for *ipFamily* when you created the cluster. Kubernetes assigns Service addresses from the unique local address range ( `fc00::/7` ) because you can't specify a custom IPv6 CIDR block when you create the cluster.", "OpenIdConnectIssuerUrl": "The issuer URL for the OIDC identity provider of the cluster, such as `https://oidc.eks.us-west-2.amazonaws.com/id/EXAMPLED539D4633E53DE1B716D3041E` . If you need to remove `https://` from this output value, you can include the following code in your template.\n\n`!Select [1, !Split [\"//\", !GetAtt EKSCluster.OpenIdConnectIssuerUrl]]`", "Ref": "`Ref` returns the resource name. For example:\n\n`{ \"Ref\": \"myCluster\" }`\n\nFor the Amazon EKS cluster `myCluster` , `Ref` returns the name of the cluster." }, @@ -14220,9 +14303,9 @@ "attributes": {}, "description": "The Kubernetes network configuration for the cluster.", "properties": { - "IpFamily": "", + "IpFamily": "Specify which IP family is used to assign Kubernetes Pod and Service IP addresses. If you don't specify a value, `ipv4` is used by default. You can only specify an IP family when you create a cluster and can't change this value once the cluster is created. If you specify `ipv6` , the VPC and subnets that you specify for cluster creation must have both IPv4 and IPv6 CIDR blocks assigned to them.\n\nYou can only specify `ipv6` for 1.21 and later clusters that use version 1.10.1 or later of the Amazon VPC CNI add-on. If you specify `ipv6` , then ensure that your VPC meets the requirements listed in the considerations listed in [Assigning IPv6 addresses to Pods and Services](https://docs.aws.amazon.com/eks/latest/userguide/cni-ipv6.html) in the Amazon EKS User Guide. Kubernetes assigns Services IPv6 addresses from the unique local address range (fc00::/7). You can't specify a custom IPv6 CIDR block. Pod addresses are assigned from the subnet's IPv6 CIDR.", "ServiceIpv4Cidr": "Don't specify a value if you select `ipv6` for *ipFamily* . The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. We recommend that you specify a block that does not overlap with resources in other networks that are peered or connected to your VPC. The block must meet the following requirements:\n\n- Within one of the following private IP address blocks: 10.0.0.0/8, 172.16.0.0/12, or 192.168.0.0/16.\n- Doesn't overlap with any CIDR block assigned to the VPC that you selected for VPC.\n- Between /24 and /12.\n\n> You can only specify a custom CIDR block when you create a cluster and can't change this value once the cluster is created.", - "ServiceIpv6Cidr": "" + "ServiceIpv6Cidr": "The CIDR block that Kubernetes Pod and Service IP addresses are assigned from if you created a 1.21 or later cluster with version 1.10.1 or later of the Amazon VPC CNI add-on and specified `ipv6` for *ipFamily* when you created the cluster. Kubernetes assigns Service addresses from the unique local address range ( `fc00::/7` ) because you can't specify a custom IPv6 CIDR block when you create the cluster." } }, "AWS::EKS::Cluster.Logging": { @@ -15082,7 +15165,7 @@ "properties": { "ReplicationGroupId": "The replication group id of the Global datastore member.", "ReplicationGroupRegion": "The Amazon region of the Global datastore member.", - "Role": "Indicates the role of the replication group, primary or secondary." + "Role": "Indicates the role of the replication group, `PRIMARY` or `SECONDARY` ." } }, "AWS::ElastiCache::GlobalReplicationGroup.RegionalConfiguration": { @@ -16185,7 +16268,16 @@ "description": "Creates a new event bus within your account. This can be a custom event bus which you can use to receive events from your custom applications and services, or it can be a partner event bus which can be matched to a partner event source.", "properties": { "EventSourceName": "If you are creating a partner event bus, this specifies the partner event source that the new event bus will be matched with.", - "Name": "The name of the new event bus.\n\nEvent bus names cannot contain the / character. You can't use the name `default` for a custom event bus, as this name is already used for your account's default event bus.\n\nIf this is a partner event bus, the name must exactly match the name of the partner event source that this event bus is matched to." + "Name": "The name of the new event bus.\n\nEvent bus names cannot contain the / character. You can't use the name `default` for a custom event bus, as this name is already used for your account's default event bus.\n\nIf this is a partner event bus, the name must exactly match the name of the partner event source that this event bus is matched to.", + "Tags": "" + } + }, + "AWS::Events::EventBus.TagEntry": { + "attributes": {}, + "description": "", + "properties": { + "Key": "", + "Value": "" } }, "AWS::Events::EventBusPolicy": { @@ -17470,6 +17562,7 @@ "NotificationTarget": "An SNS topic ARN that is set up to receive game session placement notifications. See [Setting up notifications for game session placement](https://docs.aws.amazon.com/gamelift/latest/developerguide/queue-notification.html) .", "PlayerLatencyPolicies": "A set of policies that act as a sliding cap on player latency. FleetIQ works to deliver low latency for most players in a game session. These policies ensure that no individual player can be placed into a game with unreasonably high latency. Use multiple policies to gradually relax latency requirements a step at a time. Multiple policies are applied based on their maximum allowed latency, starting with the lowest value.", "PriorityConfiguration": "Custom settings to use when prioritizing destinations and locations for game session placements. This configuration replaces the FleetIQ default prioritization process. Priority types that are not explicitly named will be automatically applied at the end of the prioritization process.", + "Tags": "A list of labels to assign to the new game session queue resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Reference* . Once the resource is created, you can use `TagResource` , `UntagResource` , and `ListTagsForResource` to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits.", "TimeoutInSeconds": "The maximum time, in seconds, that a new game session placement request remains in the queue. When a request exceeds this time, the game session placement changes to a `TIMED_OUT` status." } }, @@ -17524,7 +17617,8 @@ "Name": "A unique identifier for a matchmaking configuration. Matchmaking requests use this name to identify which matchmaking configuration to use.", "NotificationTarget": "An SNS topic ARN that is set up to receive matchmaking notifications. See [Setting up notifications for matchmaking](https://docs.aws.amazon.com/gamelift/latest/flexmatchguide/match-notification.html) for more information.", "RequestTimeoutSeconds": "The maximum duration, in seconds, that a matchmaking ticket can remain in process before timing out. Requests that fail due to timing out can be resubmitted as needed.", - "RuleSetName": "A unique identifier for the matchmaking rule set to use with this configuration. You can use either the rule set name or ARN value. A matchmaking configuration can only use rule sets that are defined in the same Region." + "RuleSetName": "A unique identifier for the matchmaking rule set to use with this configuration. You can use either the rule set name or ARN value. A matchmaking configuration can only use rule sets that are defined in the same Region.", + "Tags": "A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Reference* . Once the resource is created, you can use `TagResource` , `UntagResource` , and `ListTagsForResource` to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits." } }, "AWS::GameLift::MatchmakingConfiguration.GameProperty": { @@ -17544,7 +17638,8 @@ "description": "The `AWS::GameLift::MatchmakingRuleSet` resource creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams. It also sets the parameters for acceptable player matches, such as minimum skill level or character type. A rule set is used by a matchmaking configuration.", "properties": { "Name": "A unique identifier for the matchmaking rule set. A matchmaking configuration identifies the rule set it uses by this name value. Note that the rule set name is different from the optional `name` field in the rule set body.", - "RuleSetBody": "A collection of matchmaking rules, formatted as a JSON string. Comments are not allowed in JSON, but most elements support a description field." + "RuleSetBody": "A collection of matchmaking rules, formatted as a JSON string. Comments are not allowed in JSON, but most elements support a description field.", + "Tags": "A list of labels to assign to the new matchmaking rule set resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Reference* . Once the resource is created, you can use `TagResource` , `UntagResource` , and `ListTagsForResource` to add, remove, and view tags. The maximum tag limit may be lower than stated. See the AWS General Reference for actual tagging limits." } }, "AWS::GameLift::Script": { @@ -17557,6 +17652,7 @@ "properties": { "Name": "A descriptive label that is associated with a script. Script names do not need to be unique.", "StorageLocation": "The location in Amazon S3 where build or script files are stored for access by Amazon GameLift.", + "Tags": "", "Version": "The version that is associated with a build or script. Version strings do not need to be unique." } }, @@ -20326,7 +20422,7 @@ "properties": { "DisplayName": "Field that represents a friendly name in the console for the custom metric; it doesn't have to be unique. Don't use this name as the metric identifier in the device metric report. Can be updated.", "MetricName": "The name of the custom metric. This will be used in the metric report submitted from the device/thing. It shouldn't begin with `aws:` . Cannot be updated once it's defined.", - "MetricType": "The type of the custom metric. Types include `string-list` , `ip-address-list` , and `number-list` .", + "MetricType": "The type of the custom metric. Types include `string-list` , `ip-address-list` , `number-list` , and `number` .", "Tags": "Metadata that can be used to manage the custom metric." } }, @@ -20421,6 +20517,7 @@ "Document": "The job document.\n\nRequired if you don't specify a value for `documentSource` .", "DocumentSource": "An S3 link to the job document to use in the template. Required if you don't specify a value for `document` .\n\n> If the job document resides in an S3 bucket, you must use a placeholder link when specifying the document.\n> \n> The placeholder link is of the following form:\n> \n> `${aws:iot:s3-presigned-url:https://s3.amazonaws.com/ *bucket* / *key* }`\n> \n> where *bucket* is your bucket name and *key* is the object in the bucket to which you are linking.", "JobArn": "The ARN of the job to use as the basis for the job template.", + "JobExecutionsRetryConfig": "Allows you to create the criteria to retry a job.", "JobExecutionsRolloutConfig": "Allows you to create a staged rollout of a job.", "JobTemplateId": "A unique identifier for the job template. We recommend using a UUID. Alpha-numeric characters, \"-\", and \"_\" are valid for use here.", "PresignedUrlConfig": "Configuration for pre-signed S3 URLs.", @@ -23490,11 +23587,6 @@ "S3ContentLocation": "The location of the custom artifacts." } }, - "AWS::KinesisAnalyticsV2::Application.CustomArtifactsConfiguration": { - "attributes": {}, - "description": "A list of `CustomArtifactConfiguration` objects.", - "properties": {} - }, "AWS::KinesisAnalyticsV2::Application.DeployAsApplicationConfiguration": { "attributes": {}, "description": "The information required to deploy a Kinesis Data Analytics Studio notebook as an application with durable state.", @@ -24709,6 +24801,13 @@ "Enabled": "Indicates whether an intent uses the dialog code hook during a conversation with a user." } }, + "AWS::Lex::Bot.ExternalSourceSetting": { + "attributes": {}, + "description": "Provides information about the external source of the slot type's definition.", + "properties": { + "GrammarSlotTypeSetting": "Settings required for a slot type based on a grammar that you provide." + } + }, "AWS::Lex::Bot.FulfillmentCodeHookSetting": { "attributes": {}, "description": "Determines if a Lambda function should be invoked for a specific intent.", @@ -24746,6 +24845,22 @@ "UpdateResponse": "Provides configuration information for messages sent periodically to the user while the fulfillment Lambda function is running." } }, + "AWS::Lex::Bot.GrammarSlotTypeSetting": { + "attributes": {}, + "description": "Settings required for a slot type based on a grammar that you provide.", + "properties": { + "Source": "The source of the grammar used to create the slot type." + } + }, + "AWS::Lex::Bot.GrammarSlotTypeSource": { + "attributes": {}, + "description": "Describes the Amazon S3 bucket name and location for the grammar that is the source of the slot type.", + "properties": { + "KmsKeyArn": "The AWS Key Management Service key required to decrypt the contents of the grammar, if any.", + "S3BucketName": "The name of the S3 bucket that contains the grammar source.", + "S3ObjectKey": "The path to the grammar in the S3 bucket." + } + }, "AWS::Lex::Bot.ImageResponseCard": { "attributes": {}, "description": "A card that is shown to the user by a messaging platform. You define the contents of the card, the card is displayed by the platform.\n\nWhen you use a response card, the response from the user is constrained to the text associated with a button on the card.", @@ -24951,6 +25066,7 @@ "description": "Describes a slot type.", "properties": { "Description": "A description of the slot type. Use the description to help identify the slot type in lists.", + "ExternalSourceSetting": "Sets the type of external information used to create the slot type.", "Name": "The name of the slot type. A slot type name must be unique withing the account.", "ParentSlotTypeSignature": "The built-in slot type used as a parent of this slot type. When you define a parent slot type, the new slot type has the configuration of the parent lot type.\n\nOnly AMAZON.AlphaNumeric is supported.", "SlotTypeValues": "A list of SlotTypeValue objects that defines the values that the slot type can take. Each value can have a list of synonyms, additional values that help train the machine learning model about the values that it resolves for the slot.", @@ -25140,6 +25256,14 @@ "SourceBotVersion": "The version of a bot used for a bot locale." } }, + "AWS::Lex::BotVersion.BotVersionLocaleSpecification": { + "attributes": {}, + "description": "Specifies the locale that Amazon Lex adds to this version. You can choose the Draft version or any other previously published version for each locale. When you specify a source version, the locale data is copied from the source version to the new version.", + "properties": { + "BotVersionLocaleDetails": "The version of a bot used for a bot locale.", + "LocaleId": "The identifier of the locale to add to the version." + } + }, "AWS::Lex::BotVersion.BotVersionLocaleSpecificationItem": { "attributes": {}, "description": "Specifies the details of a locale in a bot version.", @@ -25158,6 +25282,11 @@ "ResourceArn": "The Amazon Resource Name (ARN) of the bot or bot alias that the resource policy is attached to." } }, + "AWS::Lex::ResourcePolicy.Policy": { + "attributes": {}, + "description": "The resource policy to add to the resource. The policy is a JSON structure that contains one or more statements that define the policy. The policy must follow the IAM policy syntax. For more information about the contents of a JSON policy document, see the [IAM JSON policy reference](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) .", + "properties": {} + }, "AWS::LicenseManager::Grant": { "attributes": { "GrantArn": "The Amazon Resource Name (ARN) of the grant.", @@ -25255,6 +25384,53 @@ "End": "End of the time range." } }, + "AWS::Lightsail::Alarm": { + "attributes": { + "AlarmArn": "The Amazon Resource Name (ARN) of the alarm.", + "Ref": "", + "State": "The current state of the alarm.\n\nAn alarm has the following possible states:\n\n- `ALARM` - The metric is outside of the defined threshold.\n- `INSUFFICIENT_DATA` - The alarm has recently started, the metric is not available, or not enough data is available for the metric to determine the alarm state.\n- `OK` - The metric is within the defined threshold." + }, + "description": "The `AWS::Lightsail::Alarm` resource specifies an alarm that can be used to monitor a single metric for one of your Lightsail resources.", + "properties": { + "AlarmName": "The name of the alarm.", + "ComparisonOperator": "The arithmetic operation to use when comparing the specified statistic and threshold.", + "ContactProtocols": "The contact protocols for the alarm, such as `Email` , `SMS` (text messaging), or both.\n\n*Allowed Values* : `Email` | `SMS`", + "DatapointsToAlarm": "The number of data points within the evaluation periods that must be breaching to cause the alarm to go to the `ALARM` state.", + "EvaluationPeriods": "The number of periods over which data is compared to the specified threshold.", + "MetricName": "The name of the metric associated with the alarm.", + "MonitoredResourceName": "The name of the Lightsail resource that the alarm monitors.", + "NotificationEnabled": "A Boolean value indicating whether the alarm is enabled.", + "NotificationTriggers": "The alarm states that trigger a notification.\n\n> To specify the `OK` and `INSUFFICIENT_DATA` values, you must also specify `ContactProtocols` values. Otherwise, the `OK` and `INSUFFICIENT_DATA` values will not take effect and the stack will drift. \n\n*Allowed Values* : `OK` | `ALARM` | `INSUFFICIENT_DATA`", + "Threshold": "The value against which the specified statistic is compared.", + "TreatMissingData": "Specifies how the alarm handles missing data points.\n\nAn alarm can treat missing data in the following ways:\n\n- `breaching` - Assumes the missing data is not within the threshold. Missing data counts towards the number of times that the metric is not within the threshold.\n- `notBreaching` - Assumes the missing data is within the threshold. Missing data does not count towards the number of times that the metric is not within the threshold.\n- `ignore` - Ignores the missing data. Maintains the current alarm state.\n- `missing` - Missing data is treated as missing." + } + }, + "AWS::Lightsail::Bucket": { + "attributes": { + "AbleToUpdateBundle": "A Boolean value indicating whether the bundle that is currently applied to your distribution can be changed to another bundle.", + "BucketArn": "The Amazon Resource Name (ARN) of the bucket.", + "Ref": "", + "Url": "The URL of the bucket." + }, + "description": "The `AWS::Lightsail::Bucket` resource specifies a bucket.", + "properties": { + "AccessRules": "An object that describes the access rules for the bucket.", + "BucketName": "The name of the bucket.", + "BundleId": "The bundle ID for the bucket (for example, `small_1_0` ).\n\nA bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket.", + "ObjectVersioning": "Indicates whether object versioning is enabled for the bucket.\n\nThe following options can be configured:\n\n- `Enabled` - Object versioning is enabled.\n- `Suspended` - Object versioning was previously enabled but is currently suspended. Existing object versions are retained.\n- `NeverEnabled` - Object versioning has never been enabled.", + "ReadOnlyAccessAccounts": "An array of AWS account IDs that have read-only access to the bucket.", + "ResourcesReceivingAccess": "An array of Lightsail instances that have access to the bucket.", + "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) in the *AWS CloudFormation User Guide* .\n\n> The `Value` of `Tags` is optional for Lightsail resources." + } + }, + "AWS::Lightsail::Bucket.AccessRules": { + "attributes": {}, + "description": "`AccessRules` is a property of the [AWS::Lightsail::Bucket](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lightsail-bucket.html) resource. It describes access rules for a bucket.", + "properties": { + "AllowPublicOverrides": "A Boolean value indicating whether the access control list (ACL) permissions that are applied to individual objects override the `GetObject` option that is currently specified.\n\nWhen this is true, you can use the [PutObjectAcl](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectAcl.html) Amazon S3 API operation to set individual objects to public (read-only) or private, using either the `public-read` ACL or the `private` ACL.", + "GetObject": "Specifies the anonymous access to all objects in a bucket.\n\nThe following options can be specified:\n\n- `public` - Sets all objects in the bucket to public (read-only), making them readable by everyone on the internet.\n\nIf the `GetObject` value is set to `public` , then all objects in the bucket default to public regardless of the `allowPublicOverrides` value.\n- `private` - Sets all objects in the bucket to private, making them readable only by you and anyone that you grant access to.\n\nIf the `GetObject` value is set to `private` , and the `allowPublicOverrides` value is set to `true` , then all objects in the bucket default to private unless they are configured with a `public-read` ACL. Individual objects with a `public-read` ACL are readable by everyone on the internet." + } + }, "AWS::Lightsail::Database": { "attributes": { "DatabaseArn": "The Amazon Resource Name (ARN) of the database (for example, `arn:aws:lightsail:us-east-2:123456789101:RelationalDatabase/244ad76f-8aad-4741-809f-12345EXAMPLE` ).", @@ -25275,7 +25451,7 @@ "RelationalDatabaseBundleId": "The bundle ID for the database (for example, `medium_1_0` ).", "RelationalDatabaseName": "The name of the instance.", "RelationalDatabaseParameters": "An array of parameters for the database.", - "RotateMasterUserPassword": "A boolean value indicating whether to change the primary user password to a new, strong password generated by Lightsail .\n\n> The `RotateMasterUserPassword` and `MasterUserPassword` parameters cannot be used together in the same template.", + "RotateMasterUserPassword": "A Boolean value indicating whether to change the primary user password to a new, strong password generated by Lightsail .\n\n> The `RotateMasterUserPassword` and `MasterUserPassword` parameters cannot be used together in the same template.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) in the *AWS CloudFormation User Guide* .\n\n> The `Value` of `Tags` is optional for Lightsail resources." } }, @@ -25449,6 +25625,38 @@ "Name": "The state of the instance (for example, `running` or `pending` )." } }, + "AWS::Lightsail::LoadBalancer": { + "attributes": { + "LoadBalancerArn": "The Amazon Resource Name (ARN) of the load balancer.", + "Ref": "" + }, + "description": "The `AWS::Lightsail::LoadBalancer` resource specifies a load balancer that can be used with Lightsail instances.\n\n> You cannot attach attach TLS certificates to a load balancer using the `AWS::Lightsail::LoadBalancer` resource type. Instead, use the `LoadBalancerTlsCertificate` resource type to create a certificate and attach it to a load balancer.", + "properties": { + "AttachedInstances": "The Lightsail instances to attach to the load balancer.", + "HealthCheckPath": "The path on the attached instance where the health check will be performed. If no path is specified, the load balancer tries to make a request to the default (root) page ( `/index.html` ).", + "InstancePort": "The port that the load balancer uses to direct traffic to your Lightsail instances. For HTTP traffic, specify port `80` . For HTTPS traffic, specify port `443` .", + "IpAddressType": "The IP address type of the load balancer.\n\nThe possible values are `ipv4` for IPv4 only, and `dualstack` for both IPv4 and IPv6.", + "LoadBalancerName": "The name of the load balancer.", + "SessionStickinessEnabled": "A Boolean value indicating whether session stickiness is enabled.\n\nEnable session stickiness (also known as *session affinity* ) to bind a user's session to a specific instance. This ensures that all requests from the user during the session are sent to the same instance.", + "SessionStickinessLBCookieDurationSeconds": "The time period, in seconds, after which the load balancer session stickiness cookie should be considered stale. If you do not specify this parameter, the default value is 0, which indicates that the sticky session should last for the duration of the browser session.", + "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) in the *AWS CloudFormation User Guide* .\n\n> The `Value` of `Tags` is optional for Lightsail resources." + } + }, + "AWS::Lightsail::LoadBalancerTlsCertificate": { + "attributes": { + "LoadBalancerTlsCertificateArn": "The Amazon Resource Name (ARN) of the SSL/TLS certificate.", + "Ref": "", + "Status": "The validation status of the SSL/TLS certificate.\n\nValid Values: `PENDING_VALIDATION` | `ISSUED` | `INACTIVE` | `EXPIRED` | `VALIDATION_TIMED_OUT` | `REVOKED` | `FAILED` | `UNKNOWN`" + }, + "description": "The `AWS::Lightsail::LoadBalancerTlsCertificate` resource specifies a TLS certificate that can be used with a Lightsail load balancer.", + "properties": { + "CertificateAlternativeNames": "An array of alternative domain names and subdomain names for your SSL/TLS certificate.\n\nIn addition to the primary domain name, you can have up to nine alternative domain names. Wildcards (such as `*.example.com` ) are not supported.", + "CertificateDomainName": "The domain name for the SSL/TLS certificate. For example, `example.com` or `www.example.com` .", + "CertificateName": "The name of the SSL/TLS certificate.", + "IsAttached": "A Boolean value indicating whether the SSL/TLS certificate is attached to a Lightsail load balancer.", + "LoadBalancerName": "The name of the load balancer that the SSL/TLS certificate is attached to." + } + }, "AWS::Lightsail::StaticIp": { "attributes": { "IpAddress": "The IP address of the static IP.", @@ -27331,6 +27539,7 @@ "Mode": "If \"vod,\" all segments are indexed and kept permanently in the destination and manifest. If \"live,\" only the number segments specified in keepSegments and indexNSegments are kept. Newer segments replace older segments, which might prevent players from rewinding all the way to the beginning of the channel. VOD mode uses HLS EXT-X-PLAYLIST-TYPE of EVENT while the channel is running, converting it to a \"VOD\" type manifest on completion of the stream.", "OutputSelection": "MANIFESTSANDSEGMENTS: Generates manifests (the master manifest, if applicable, and media manifests) for this output group. SEGMENTSONLY: Doesn't generate any manifests for this output group.", "ProgramDateTime": "Includes or excludes the EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. The value is calculated as follows: Either the program date and time are initialized using the input timecode source, or the time is initialized using the input timecode source and the date is initialized using the timestampOffset.", + "ProgramDateTimeClock": "", "ProgramDateTimePeriod": "The period of insertion of the EXT-X-PROGRAM-DATE-TIME entry, in seconds.", "RedundantManifest": "ENABLED: The master manifest (.m3u8 file) for each pipeline includes information about both pipelines: first its own media files, then the media files of the other pipeline. This feature allows a playout device that supports stale manifest detection to switch from one manifest to the other, when the current manifest seems to be stale. There are still two destinations and two master manifests, but both master manifests reference the media files from both pipelines. DISABLED: The master manifest (.m3u8 file) for each pipeline includes information about its own pipeline only. For an HLS output group with MediaPackage as the destination, the DISABLED behavior is always followed. MediaPackage regenerates the manifests it serves to players, so a redundant manifest from MediaLive is irrelevant.", "SegmentLength": "The length of the MPEG-2 Transport Stream segments to create, in seconds. Note that segments will end on the next keyframe after this number of seconds, so the actual segment length might be longer.", @@ -27464,6 +27673,7 @@ "FilterStrength": "Adjusts the magnitude of filtering from 1 (minimal) to 5 (strongest).", "InputFilter": "Turns on the filter for this input. MPEG-2 inputs have the deblocking filter enabled by default. 1) auto - filtering is applied depending on input type/quality 2) disabled - no filtering is applied to the input 3) forced - filtering is applied regardless of the input type.", "NetworkInputSettings": "Information about how to connect to the upstream system.", + "Scte35Pid": "", "Smpte2038DataPreference": "Specifies whether to extract applicable ancillary data from a SMPTE-2038 source in this input. Applicable data types are captions, timecode, AFD, and SCTE-104 messages.\n- PREFER: Extract from SMPTE-2038 if present in this input, otherwise extract from another source (if any).\n- IGNORE: Never extract any ancillary data from SMPTE-2038.", "SourceEndBehavior": "The loop input if it is a file.", "VideoSelector": "Information about one video to extract from the input." @@ -29243,9 +29453,27 @@ "ClipboardMode": "Enable or disable the use of the system clipboard to copy and paste between the streaming session and streaming client.", "Ec2InstanceTypes": "The EC2 instance types that users can select from when launching a streaming session with this launch profile.", "MaxSessionLengthInMinutes": "The length of time, in minutes, that a streaming session can be active before it is stopped or terminated. After this point, Nimble Studio automatically terminates or stops the session. The default length of time is 690 minutes, and the maximum length of time is 30 days.", + "MaxStoppedSessionLengthInMinutes": "Integer that determines if you can start and stop your sessions and how long a session can stay in the STOPPED state. The default value is 0. The maximum value is 5760.\n\nIf the value is missing or set to 0, your sessions can\u2019t be stopped. If you then call `StopStreamingSession` , the session fails. If the time that a session stays in the READY state exceeds the `maxSessionLengthInMinutes` value, the session will automatically be terminated (instead of stopped).\n\nIf the value is set to a positive number, the session can be stopped. You can call `StopStreamingSession` to stop sessions in the READY state. If the time that a session stays in the READY state exceeds the `maxSessionLengthInMinutes` value, the session will automatically be stopped (instead of terminated).", + "SessionStorage": "(Optional) The upload storage for a streaming session.", "StreamingImageIds": "The streaming images that users can select from when launching a streaming session with this launch profile." } }, + "AWS::NimbleStudio::LaunchProfile.StreamConfigurationSessionStorage": { + "attributes": {}, + "description": "The configuration for a streaming session\u2019s upload storage.", + "properties": { + "Mode": "Allows artists to upload files to their workstations. The only valid option is `UPLOAD` .", + "Root": "The configuration for the upload storage root of the streaming session." + } + }, + "AWS::NimbleStudio::LaunchProfile.StreamingSessionStorageRoot": { + "attributes": {}, + "description": "The upload storage root location (folder) on streaming workstations where files are uploaded.", + "properties": { + "Linux": "The folder path in Linux workstations where files are uploaded.", + "Windows": "The folder path in Windows workstations where files are uploaded." + } + }, "AWS::NimbleStudio::StreamingImage": { "attributes": { "EulaIds": "The list of IDs of EULAs that must be accepted before a streaming session can be started using this streaming image.", @@ -31986,8 +32214,7 @@ "attributes": { "DBProxyArn": "The Amazon Resource Name (ARN) representing the target group.", "Endpoint": "The writer endpoint for the RDS DB instance or Aurora DB cluster.", - "Ref": "`Ref` returns the name of the DB proxy.", - "VpcId": "The VPC ID to associate with the DB proxy." + "Ref": "`Ref` returns the name of the DB proxy." }, "description": "The `AWS::RDS::DBProxy` resource creates or updates a DB proxy.\n\nFor information about RDS Proxy for Amazon RDS, see [Managing Connections with Amazon RDS Proxy](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-proxy.html) in the *Amazon RDS User Guide* .\n\nFor information about RDS Proxy for Amazon Aurora, see [Managing Connections with Amazon RDS Proxy](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-proxy.html) in the *Amazon Aurora User Guide* .\n\n> Limitations apply to RDS Proxy, including DB engine version limitations and AWS Region limitations.\n> \n> For information about limitations that apply to RDS Proxy for Amazon RDS, see [Limitations for RDS Proxy](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-proxy.html#rds-proxy.limitations) in the *Amazon RDS User Guide* .\n> \n> For information about that apply to RDS Proxy for Amazon Aurora, see [Limitations for RDS Proxy](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-proxy.html#rds-proxy.limitations) in the *Amazon Aurora User Guide* .", "properties": { @@ -32853,7 +33080,7 @@ "HostedZoneConfig": "A complex type that contains an optional comment.\n\nIf you don't want to specify a comment, omit the `HostedZoneConfig` and `Comment` elements.", "HostedZoneTags": "Adds, edits, or deletes tags for a health check or a hosted zone.\n\nFor information about using tags for cost allocation, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the *AWS Billing and Cost Management User Guide* .", "Name": "The name of the domain. Specify a fully qualified domain name, for example, *www.example.com* . The trailing dot is optional; Amazon Route 53 assumes that the domain name is fully qualified. This means that Route 53 treats *www.example.com* (without a trailing dot) and *www.example.com.* (with a trailing dot) as identical.\n\nIf you're creating a public hosted zone, this is the name you have registered with your DNS registrar. If your domain name is registered with a registrar other than Route 53, change the name servers for your domain to the set of `NameServers` that are returned by the `Fn::GetAtt` intrinsic function.", - "QueryLoggingConfig": "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.\n\nDNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:\n\n- Route 53 edge location that responded to the DNS query\n- Domain or subdomain that was requested\n- DNS record type, such as A or AAAA\n- DNS response code, such as `NoError` or `ServFail`\n\n- **Log Group and Resource Policy** - Before you create a query logging configuration, perform the following operations.\n\n> If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. \n\n- Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:\n\n- You must create the log group in the us-east-1 region.\n- You must use the same AWS account to create the log group and the hosted zone that you want to configure query logging for.\n- When you create log groups for query logging, we recommend that you use a consistent prefix, for example:\n\n`/aws/route53/ *hosted zone name*`\n\nIn the next step, you'll create a resource policy, which controls access to one or more log groups and the associated AWS resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.\n- Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of `Resource` , specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with `*` , for example:\n\n`arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*`\n\n> You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the AWS SDKs, or the AWS CLI .\n- **Log Streams and Edge Locations** - When Route 53 finishes creating the configuration for DNS query logging, it does the following:\n\n- Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.\n- Begins to send query logs to the applicable log stream.\n\nThe name of each log stream is in the following format:\n\n`*hosted zone ID* / *edge location code*`\n\nThe edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the [Route 53 Product Details](https://docs.aws.amazon.com/route53/details/) page.\n- **Queries That Are Logged** - Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see [Routing Internet Traffic to Your Website or Web Application](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html) in the *Amazon Route 53 Developer Guide* .\n- **Log File Format** - For a list of the values in each query log and the format of each value, see [Logging DNS Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html) in the *Amazon Route 53 Developer Guide* .\n- **Pricing** - For information about charges for query logs, see [Amazon CloudWatch Pricing](https://docs.aws.amazon.com/cloudwatch/pricing/) .\n- **How to Stop Logging** - If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see [DeleteQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteQueryLoggingConfig.html) .", + "QueryLoggingConfig": "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.\n\nDNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:\n\n- Route 53 edge location that responded to the DNS query\n- Domain or subdomain that was requested\n- DNS record type, such as A or AAAA\n- DNS response code, such as `NoError` or `ServFail`\n\n- **Log Group and Resource Policy** - Before you create a query logging configuration, perform the following operations.\n\n> If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. \n\n- Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:\n\n- You must create the log group in the us-east-1 region.\n- You must use the same AWS account to create the log group and the hosted zone that you want to configure query logging for.\n- When you create log groups for query logging, we recommend that you use a consistent prefix, for example:\n\n`/aws/route53/ *hosted zone name*`\n\nIn the next step, you'll create a resource policy, which controls access to one or more log groups and the associated AWS resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.\n- Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of `Resource` , specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with `*` , for example:\n\n`arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*`\n\nTo avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values:\n\n- For `aws:SourceArn` , supply the hosted zone ARN used in creating the query logging configuration. For example, `aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID` .\n- For `aws:SourceAccount` , supply the account ID for the account that creates the query logging configuration. For example, `aws:SourceAccount:111111111111` .\n\nFor more information, see [The confused deputy problem](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html) in the *AWS IAM User Guide* .\n\n> You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the AWS SDKs, or the AWS CLI .\n- **Log Streams and Edge Locations** - When Route 53 finishes creating the configuration for DNS query logging, it does the following:\n\n- Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.\n- Begins to send query logs to the applicable log stream.\n\nThe name of each log stream is in the following format:\n\n`*hosted zone ID* / *edge location code*`\n\nThe edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the [Route 53 Product Details](https://docs.aws.amazon.com/route53/details/) page.\n- **Queries That Are Logged** - Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see [Routing Internet Traffic to Your Website or Web Application](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html) in the *Amazon Route 53 Developer Guide* .\n- **Log File Format** - For a list of the values in each query log and the format of each value, see [Logging DNS Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html) in the *Amazon Route 53 Developer Guide* .\n- **Pricing** - For information about charges for query logs, see [Amazon CloudWatch Pricing](https://docs.aws.amazon.com/cloudwatch/pricing/) .\n- **How to Stop Logging** - If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see [DeleteQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteQueryLoggingConfig.html) .", "VPCs": "*Private hosted zones:* A complex type that contains information about the VPCs that are associated with the specified hosted zone.\n\n> For public hosted zones, omit `VPCs` , `VPCId` , and `VPCRegion` ." } }, @@ -34783,7 +35010,7 @@ "AllowedPattern": "A regular expression used to validate the parameter value. For example, for String types with values restricted to numbers, you can specify the following: `AllowedPattern=^\\d+$`", "DataType": "The data type of the parameter, such as `text` or `aws:ec2:image` . The default is `text` .", "Description": "Information about the parameter.", - "Name": "The name of the parameter.", + "Name": "The name of the parameter.\n\n> The maximum length constraint listed below includes capacity for additional system attributes that aren't part of the name. The maximum length for a parameter name, including the full length of the parameter ARN, is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters: `arn:aws:ssm:us-east-2:111222333444:parameter/ExampleParameterName`", "Policies": "Information about the policies assigned to a parameter.\n\n[Assigning parameter policies](https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-policies.html) in the *AWS Systems Manager User Guide* .", "Tags": "Optional metadata that you assign to a resource in the form of an arbitrary set of tags (key-value pairs). Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a Systems Manager parameter to identify the type of resource to which it applies, the environment, or the type of configuration data referenced by the parameter.", "Tier": "The parameter tier.", @@ -37491,7 +37718,8 @@ "attributes": {}, "description": "Protocol settings that are configured for your server.", "properties": { - "PassiveIp": "Indicates passive mode, for FTP and FTPS protocols. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer." + "PassiveIp": "Indicates passive mode, for FTP and FTPS protocols. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.", + "TlsSessionResumptionMode": "A property used with Transfer servers that use the FTPS protocol. TLS Session Resumption provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. `TlsSessionResumptionMode` determines whether or not the server resumes recent, negotiated sessions through a unique session ID. This property is available during `CreateServer` and `UpdateServer` calls. If a `TlsSessionResumptionMode` value is not specified during CreateServer, it is set to `ENFORCED` by default.\n\n- `DISABLED` : the server does not process TLS session resumption client requests and creates a new TLS session for each request.\n- `ENABLED` : the server processes and accepts clients that are performing TLS session resumption. The server doesn't reject client data connections that do not perform the TLS session resumption client processing.\n- `ENFORCED` : the server processes and accepts clients that are performing TLS session resumption. The server rejects client data connections that do not perform the TLS session resumption client processing. Before you set the value to `ENFORCED` , test your clients.\n\n> Not all FTPS clients perform TLS session resumption. So, if you choose to enforce TLS session resumption, you prevent any connections from FTPS clients that don't perform the protocol negotiation. To determine whether or not you can use the `ENFORCED` value, you need to test your clients." } }, "AWS::Transfer::Server.WorkflowDetail": { From 912aeda295820920ed880b9c85a98c56421647b8 Mon Sep 17 00:00:00 2001 From: Yerzhan Mazhkenov <20302932+yerzhan7@users.noreply.github.com> Date: Mon, 10 Jan 2022 18:14:19 +0600 Subject: [PATCH 027/374] feat(s3): add EventBridge bucket notifications (#18150) ### **Description** Adds EventBridge bucket notification configuration. See https://aws.amazon.com/blogs/aws/new-use-amazon-s3-event-notifications-with-amazon-eventbridge/ ### **Implementation** - Added new Bucket property to enable this feature (`eventBridgeEnabled: true`) - Added EventBridge config to `S3BucketNotifications` Custom Resource - Added unit tests - Added integration test (currently fails, see below for more info) - Fixed dependent integration tests Closes #18076 ### **FAQ** 1. **Why not simply expose EventBridge Cfn property via S3 BucketProps?** Currently CDK manages `NotificationConfigurations `via CustomResource. If we were to expose that way, then e.g. SNS configuration would override EventBridge config. 2. **Why not create new `IBucketNotificationDestination` class for EventBridge?** We can, but there is no need. Usually we create a subclass to `IBucketNotificationDestination` in order to adjust resource permissions, however in this case there is no need to adjust permissions: [default EventBridge does not require any additional permissions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ev-permissions.html) unlike SQS/SNS/Lambda destinations. Additionally, enabling this feature via bucket props is much cleaner/simpler API than creating new dummy object of type `IBucketNotificationDestination` for customers. However, if you still think that we need to create new `IBucketNotificationDestination` subclass for EventBridge for consistency, let me know and I will refactor. ---- **BLOCKED ON LAMBDA RUNTIME SDK UPDATE TO BOTOCORE >= v1.23.16 (Integration test currently fails as current version (v1.21.55) does not contain EventBridge configuration)** Check latest version here: https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../test/integ.s3.expected.json | 4 +- .../test/integ.notifications.expected.json | 2 +- .../integ.bucket-notifications.expected.json | 4 +- .../test/lambda/integ.bucket-notifications.ts | 2 +- ...teg.sns-bucket-notifications.expected.json | 2 +- .../integ.bucket-notifications.expected.json | 2 +- packages/@aws-cdk/aws-s3/README.md | 15 ++ packages/@aws-cdk/aws-s3/lib/bucket.ts | 17 ++ .../notifications-resource/lambda/index.py | 14 ++ .../notifications-resource.ts | 10 + .../integ.bucket.notifications.expected.json | 101 +++++++++ .../aws-s3/test/integ.bucket.notifications.ts | 14 ++ .../@aws-cdk/aws-s3/test/notification.test.ts | 18 ++ .../test_index.py | 203 ++++++++++++++++++ 14 files changed, 400 insertions(+), 8 deletions(-) create mode 100644 packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.expected.json create mode 100644 packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.ts diff --git a/packages/@aws-cdk/aws-lambda-event-sources/test/integ.s3.expected.json b/packages/@aws-cdk/aws-lambda-event-sources/test/integ.s3.expected.json index ff71167d19f9e..bea3d9952fbc6 100644 --- a/packages/@aws-cdk/aws-lambda-event-sources/test/integ.s3.expected.json +++ b/packages/@aws-cdk/aws-lambda-event-sources/test/integ.s3.expected.json @@ -177,7 +177,7 @@ "Properties": { "Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)", "Code": { - "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" + "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n # EventBridge configuration is a special case because it's just an empty object if it exists\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n # always treat EventBridge configuration as an external config if it already exists\n # as there is no way to determine whether it's managed by us or not\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" }, "Handler": "index.handler", "Role": { @@ -195,4 +195,4 @@ ] } } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-s3-notifications/test/integ.notifications.expected.json b/packages/@aws-cdk/aws-s3-notifications/test/integ.notifications.expected.json index 9026931306ab4..a467d8e2d5a04 100644 --- a/packages/@aws-cdk/aws-s3-notifications/test/integ.notifications.expected.json +++ b/packages/@aws-cdk/aws-s3-notifications/test/integ.notifications.expected.json @@ -247,7 +247,7 @@ "Properties": { "Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)", "Code": { - "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" + "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n # EventBridge configuration is a special case because it's just an empty object if it exists\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n # always treat EventBridge configuration as an external config if it already exists\n # as there is no way to determine whether it's managed by us or not\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" }, "Handler": "index.handler", "Role": { diff --git a/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.expected.json b/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.expected.json index 731effea95a53..71d8c5be9f84f 100644 --- a/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.expected.json +++ b/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.expected.json @@ -114,7 +114,7 @@ ] }, "Handler": "index.handler", - "Runtime": "nodejs10.x" + "Runtime": "nodejs14.x" }, "DependsOn": [ "MyFunctionServiceRole3C357FF2" @@ -237,7 +237,7 @@ "Properties": { "Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)", "Code": { - "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" + "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n # EventBridge configuration is a special case because it's just an empty object if it exists\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n # always treat EventBridge configuration as an external config if it already exists\n # as there is no way to determine whether it's managed by us or not\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" }, "Handler": "index.handler", "Role": { diff --git a/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.ts b/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.ts index c237b24e896e3..1493e29176362 100644 --- a/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.ts +++ b/packages/@aws-cdk/aws-s3-notifications/test/lambda/integ.bucket-notifications.ts @@ -12,7 +12,7 @@ const bucketA = new s3.Bucket(stack, 'MyBucket', { }); const fn = new lambda.Function(stack, 'MyFunction', { - runtime: lambda.Runtime.NODEJS_10_X, + runtime: lambda.Runtime.NODEJS_14_X, handler: 'index.handler', code: lambda.Code.fromInline(`exports.handler = ${handler.toString()}`), }); diff --git a/packages/@aws-cdk/aws-s3-notifications/test/sns/integ.sns-bucket-notifications.expected.json b/packages/@aws-cdk/aws-s3-notifications/test/sns/integ.sns-bucket-notifications.expected.json index 47f2a8ea6e0ce..c55da2fa8dcb2 100644 --- a/packages/@aws-cdk/aws-s3-notifications/test/sns/integ.sns-bucket-notifications.expected.json +++ b/packages/@aws-cdk/aws-s3-notifications/test/sns/integ.sns-bucket-notifications.expected.json @@ -195,7 +195,7 @@ "Properties": { "Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)", "Code": { - "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" + "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n # EventBridge configuration is a special case because it's just an empty object if it exists\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n # always treat EventBridge configuration as an external config if it already exists\n # as there is no way to determine whether it's managed by us or not\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" }, "Handler": "index.handler", "Role": { diff --git a/packages/@aws-cdk/aws-s3-notifications/test/sqs/integ.bucket-notifications.expected.json b/packages/@aws-cdk/aws-s3-notifications/test/sqs/integ.bucket-notifications.expected.json index 229b916beac4b..006856d6de207 100644 --- a/packages/@aws-cdk/aws-s3-notifications/test/sqs/integ.bucket-notifications.expected.json +++ b/packages/@aws-cdk/aws-s3-notifications/test/sqs/integ.bucket-notifications.expected.json @@ -184,7 +184,7 @@ "Properties": { "Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)", "Code": { - "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" + "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n # EventBridge configuration is a special case because it's just an empty object if it exists\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n # always treat EventBridge configuration as an external config if it already exists\n # as there is no way to determine whether it's managed by us or not\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" }, "Handler": "index.handler", "Role": { diff --git a/packages/@aws-cdk/aws-s3/README.md b/packages/@aws-cdk/aws-s3/README.md index 4f2592b9c633a..02c1b86be2855 100644 --- a/packages/@aws-cdk/aws-s3/README.md +++ b/packages/@aws-cdk/aws-s3/README.md @@ -252,6 +252,21 @@ bucket.addEventNotification(s3.EventType.OBJECT_CREATED, new s3n.SnsDestination( [S3 Bucket Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +### EventBridge notifications + +Amazon S3 can send events to Amazon EventBridge whenever certain events happen in your bucket. +Unlike other destinations, you don't need to select which event types you want to deliver. + +The following example will enable EventBridge notifications: + +```ts +const bucket = new s3.Bucket(this, 'MyEventBridgeBucket', { + eventBridgeEnabled: true, +}); +``` + +[S3 EventBridge notifications]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/EventBridge.html + ## Block Public Access Use `blockPublicAccess` to specify [block public access settings] on the bucket. diff --git a/packages/@aws-cdk/aws-s3/lib/bucket.ts b/packages/@aws-cdk/aws-s3/lib/bucket.ts index bb00aac5adde3..15e69d4610a70 100644 --- a/packages/@aws-cdk/aws-s3/lib/bucket.ts +++ b/packages/@aws-cdk/aws-s3/lib/bucket.ts @@ -865,6 +865,10 @@ export abstract class BucketBase extends Resource implements IBucket { return this.addEventNotification(EventType.OBJECT_REMOVED, dest, ...filters); } + protected enableEventBridgeNotification() { + this.notifications.enableEventBridgeNotification(); + } + private get writeActions(): string[] { return [ ...perms.BUCKET_DELETE_ACTIONS, @@ -1341,6 +1345,13 @@ export interface BucketProps { */ readonly versioned?: boolean; + /** + * Whether this bucket should send notifications to Amazon EventBridge or not. + * + * @default false + */ + readonly eventBridgeEnabled?: boolean; + /** * Rules that define how Amazon S3 manages objects during their lifetime. * @@ -1621,6 +1632,7 @@ export class Bucket extends BucketBase { private accessControl?: BucketAccessControl; private readonly lifecycleRules: LifecycleRule[] = []; private readonly versioned?: boolean; + private readonly eventBridgeEnabled?: boolean; private readonly metrics: BucketMetrics[] = []; private readonly cors: CorsRule[] = []; private readonly inventories: Inventory[] = []; @@ -1660,6 +1672,7 @@ export class Bucket extends BucketBase { this.versioned = props.versioned; this.encryptionKey = encryptionKey; + this.eventBridgeEnabled = props.eventBridgeEnabled; this.bucketName = this.getResourceNameAttribute(resource.ref); this.bucketArn = this.getResourceArnAttribute(resource.attrArn, { @@ -1710,6 +1723,10 @@ export class Bucket extends BucketBase { this.enableAutoDeleteObjects(); } + + if (this.eventBridgeEnabled) { + this.enableEventBridgeNotification(); + } } /** diff --git a/packages/@aws-cdk/aws-s3/lib/notifications-resource/lambda/index.py b/packages/@aws-cdk/aws-s3/lib/notifications-resource/lambda/index.py index 2551398d74958..d46989246e827 100644 --- a/packages/@aws-cdk/aws-s3/lib/notifications-resource/lambda/index.py +++ b/packages/@aws-cdk/aws-s3/lib/notifications-resource/lambda/index.py @@ -5,6 +5,8 @@ s3 = boto3.client("s3") +EVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration' + CONFIGURATION_TYPES = ["TopicConfigurations", "QueueConfigurations", "LambdaFunctionConfigurations"] def handler(event: dict, context): @@ -57,6 +59,13 @@ def with_id(notification): external = external_notifications.get(t, []) incoming = [with_id(n) for n in notification_configuration.get(t, [])] notifications[t] = external + incoming + + # EventBridge configuration is a special case because it's just an empty object if it exists + if EVENTBRIDGE_CONFIGURATION in notification_configuration: + notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION] + elif EVENTBRIDGE_CONFIGURATION in external_notifications: + notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION] + return notifications @@ -68,6 +77,11 @@ def find_external_notifications(bucket, stack_id): # so we can filter by it. external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f"{stack_id}-")] + # always treat EventBridge configuration as an external config if it already exists + # as there is no way to determine whether it's managed by us or not + if EVENTBRIDGE_CONFIGURATION in existing_notifications: + external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION] + return external_notifications diff --git a/packages/@aws-cdk/aws-s3/lib/notifications-resource/notifications-resource.ts b/packages/@aws-cdk/aws-s3/lib/notifications-resource/notifications-resource.ts index d5190f1a6a913..7ac760d006da9 100644 --- a/packages/@aws-cdk/aws-s3/lib/notifications-resource/notifications-resource.ts +++ b/packages/@aws-cdk/aws-s3/lib/notifications-resource/notifications-resource.ts @@ -31,6 +31,7 @@ interface NotificationsProps { * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-bucket-notificationconfig.html */ export class BucketNotifications extends Construct { + private eventBridgeEnabled = false; private readonly lambdaNotifications = new Array(); private readonly queueNotifications = new Array(); private readonly topicNotifications = new Array(); @@ -87,8 +88,14 @@ export class BucketNotifications extends Construct { } } + public enableEventBridgeNotification() { + this.createResourceOnce(); + this.eventBridgeEnabled = true; + } + private renderNotificationConfiguration(): NotificationConfiguration { return { + EventBridgeConfiguration: this.eventBridgeEnabled ? {} : undefined, LambdaFunctionConfigurations: this.lambdaNotifications.length > 0 ? this.lambdaNotifications : undefined, QueueConfigurations: this.queueNotifications.length > 0 ? this.queueNotifications : undefined, TopicConfigurations: this.topicNotifications.length > 0 ? this.topicNotifications : undefined, @@ -167,6 +174,7 @@ function renderFilters(filters?: NotificationKeyFilter[]): Filter | undefined { } interface NotificationConfiguration { + EventBridgeConfiguration?: EventBridgeConfiguration; LambdaFunctionConfigurations?: LambdaFunctionConfiguration[]; QueueConfigurations?: QueueConfiguration[]; TopicConfigurations?: TopicConfiguration[]; @@ -178,6 +186,8 @@ interface CommonConfiguration { Filter?: Filter } +interface EventBridgeConfiguration { } + interface LambdaFunctionConfiguration extends CommonConfiguration { LambdaFunctionArn: string; } diff --git a/packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.expected.json b/packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.expected.json new file mode 100644 index 0000000000000..4c3711e8018ef --- /dev/null +++ b/packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.expected.json @@ -0,0 +1,101 @@ +{ + "Resources": { + "MyEventBridgeBucketNotifications19C0453F": { + "Type": "Custom::S3BucketNotifications", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "BucketNotificationsHandler050a0587b7544547bf325f094a3db8347ECC3691", + "Arn" + ] + }, + "BucketName": { + "Ref": "MyEventBridgeBucket1ABD5C2A" + }, + "NotificationConfiguration": { + "EventBridgeConfiguration": {} + }, + "Managed": true + } + }, + "MyEventBridgeBucket1ABD5C2A": { + "Type": "AWS::S3::Bucket", + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + } + }, + "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleDefaultPolicy2CF63D36": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "s3:PutBucketNotification", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleDefaultPolicy2CF63D36", + "Roles": [ + { + "Ref": "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC" + } + ] + } + }, + "BucketNotificationsHandler050a0587b7544547bf325f094a3db8347ECC3691": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Description": "AWS CloudFormation handler for \"Custom::S3BucketNotifications\" resources (@aws-cdk/aws-s3)", + "Code": { + "ZipFile": "import boto3 # type: ignore\nimport json\nimport logging\nimport urllib.request\n\ns3 = boto3.client(\"s3\")\n\nEVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration'\n\nCONFIGURATION_TYPES = [\"TopicConfigurations\", \"QueueConfigurations\", \"LambdaFunctionConfigurations\"]\n\ndef handler(event: dict, context):\n response_status = \"SUCCESS\"\n error_message = \"\"\n try:\n props = event[\"ResourceProperties\"]\n bucket = props[\"BucketName\"]\n notification_configuration = props[\"NotificationConfiguration\"]\n request_type = event[\"RequestType\"]\n managed = props.get('Managed', 'true').lower() == 'true'\n stack_id = event['StackId']\n\n if managed:\n config = handle_managed(request_type, notification_configuration)\n else:\n config = handle_unmanaged(bucket, stack_id, request_type, notification_configuration)\n\n put_bucket_notification_configuration(bucket, config)\n except Exception as e:\n logging.exception(\"Failed to put bucket notification configuration\")\n response_status = \"FAILED\"\n error_message = f\"Error: {str(e)}. \"\n finally:\n submit_response(event, context, response_status, error_message)\n\n\ndef handle_managed(request_type, notification_configuration):\n if request_type == 'Delete':\n return {}\n return notification_configuration\n\n\ndef handle_unmanaged(bucket, stack_id, request_type, notification_configuration):\n\n # find external notifications\n external_notifications = find_external_notifications(bucket, stack_id)\n\n # if delete, that's all we need\n if request_type == 'Delete':\n return external_notifications\n\n def with_id(notification):\n notification['Id'] = f\"{stack_id}-{hash(json.dumps(notification, sort_keys=True))}\"\n return notification\n\n # otherwise, merge external with incoming config and augment with id\n notifications = {}\n for t in CONFIGURATION_TYPES:\n external = external_notifications.get(t, [])\n incoming = [with_id(n) for n in notification_configuration.get(t, [])]\n notifications[t] = external + incoming\n\n # EventBridge configuration is a special case because it's just an empty object if it exists\n if EVENTBRIDGE_CONFIGURATION in notification_configuration:\n notifications[EVENTBRIDGE_CONFIGURATION] = notification_configuration[EVENTBRIDGE_CONFIGURATION]\n elif EVENTBRIDGE_CONFIGURATION in external_notifications:\n notifications[EVENTBRIDGE_CONFIGURATION] = external_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return notifications\n\n\ndef find_external_notifications(bucket, stack_id):\n existing_notifications = get_bucket_notification_configuration(bucket)\n external_notifications = {}\n for t in CONFIGURATION_TYPES:\n # if the notification was created by us, we know what id to expect\n # so we can filter by it.\n external_notifications[t] = [n for n in existing_notifications.get(t, []) if not n['Id'].startswith(f\"{stack_id}-\")]\n\n # always treat EventBridge configuration as an external config if it already exists\n # as there is no way to determine whether it's managed by us or not\n if EVENTBRIDGE_CONFIGURATION in existing_notifications:\n external_notifications[EVENTBRIDGE_CONFIGURATION] = existing_notifications[EVENTBRIDGE_CONFIGURATION]\n\n return external_notifications\n\n\ndef get_bucket_notification_configuration(bucket):\n return s3.get_bucket_notification_configuration(Bucket=bucket)\n\n\ndef put_bucket_notification_configuration(bucket, notification_configuration):\n s3.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=notification_configuration)\n\n\ndef submit_response(event: dict, context, response_status: str, error_message: str):\n response_body = json.dumps(\n {\n \"Status\": response_status,\n \"Reason\": f\"{error_message}See the details in CloudWatch Log Stream: {context.log_stream_name}\",\n \"PhysicalResourceId\": event.get(\"PhysicalResourceId\") or event[\"LogicalResourceId\"],\n \"StackId\": event[\"StackId\"],\n \"RequestId\": event[\"RequestId\"],\n \"LogicalResourceId\": event[\"LogicalResourceId\"],\n \"NoEcho\": False,\n }\n ).encode(\"utf-8\")\n headers = {\"content-type\": \"\", \"content-length\": str(len(response_body))}\n try:\n req = urllib.request.Request(url=event[\"ResponseURL\"], headers=headers, data=response_body, method=\"PUT\")\n with urllib.request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n print(\"Status code: \" + response.reason)\n except Exception as e:\n print(\"send(..) failed executing request.urlopen(..): \" + str(e))\n" + }, + "Handler": "index.handler", + "Role": { + "Fn::GetAtt": [ + "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC", + "Arn" + ] + }, + "Runtime": "python3.7", + "Timeout": 300 + }, + "DependsOn": [ + "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleDefaultPolicy2CF63D36", + "BucketNotificationsHandler050a0587b7544547bf325f094a3db834RoleB6FB88EC" + ] + } + } +} diff --git a/packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.ts b/packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.ts new file mode 100644 index 0000000000000..3180b491f6250 --- /dev/null +++ b/packages/@aws-cdk/aws-s3/test/integ.bucket.notifications.ts @@ -0,0 +1,14 @@ +#!/usr/bin/env node +import * as cdk from '@aws-cdk/core'; +import * as s3 from '../lib'; + +const app = new cdk.App(); + +const stack = new cdk.Stack(app, 'aws-cdk-s3-notifications'); + +new s3.Bucket(stack, 'MyEventBridgeBucket', { + eventBridgeEnabled: true, + removalPolicy: cdk.RemovalPolicy.DESTROY, +}); + +app.synth(); diff --git a/packages/@aws-cdk/aws-s3/test/notification.test.ts b/packages/@aws-cdk/aws-s3/test/notification.test.ts index fbc8e1aa45a49..e3e1d81687100 100644 --- a/packages/@aws-cdk/aws-s3/test/notification.test.ts +++ b/packages/@aws-cdk/aws-s3/test/notification.test.ts @@ -122,4 +122,22 @@ describe('notification', () => { }), }, { suffix: '.png' }, { suffix: '.zip' })).toThrow(/suffix rule/); }); + + test('EventBridge notification custom resource', () => { + // GIVEN + const stack = new cdk.Stack(); + + // WHEN + new s3.Bucket(stack, 'MyBucket', { + eventBridgeEnabled: true, + }); + + // THEN + Template.fromStack(stack).resourceCountIs('AWS::S3::Bucket', 1); + Template.fromStack(stack).hasResourceProperties('Custom::S3BucketNotifications', { + NotificationConfiguration: { + EventBridgeConfiguration: {}, + }, + }); + }); }); diff --git a/packages/@aws-cdk/aws-s3/test/notifications-resource-handler/test_index.py b/packages/@aws-cdk/aws-s3/test/notifications-resource-handler/test_index.py index 9127677b02675..ff79da80ef669 100644 --- a/packages/@aws-cdk/aws-s3/test/notifications-resource-handler/test_index.py +++ b/packages/@aws-cdk/aws-s3/test/notifications-resource-handler/test_index.py @@ -19,6 +19,8 @@ ) sys.exit(1) +EVENTBRIDGE_CONFIGURATION = 'EventBridgeConfiguration' + CONFIGURATION_TYPES = ["TopicConfigurations", "QueueConfigurations", "LambdaFunctionConfigurations"] @@ -33,6 +35,16 @@ def make_event(request_type: str, managed: bool): }, } +def make_event_with_eventbridge(request_type: str, managed: bool): + return { + "StackId": "StackId", + "RequestType": request_type, + "ResourceProperties": { + "Managed": str(managed), + "BucketName": "BucketName", + "NotificationConfiguration": make_notification_configuration_with_eventbridge(), + }, + } def make_notification_configuration(id_prefix: str = None): def make_id(): @@ -43,6 +55,11 @@ def make_id(): config[t] = [{"Id": make_id()}] return config +def make_notification_configuration_with_eventbridge(id_prefix: str = None): + return {**make_notification_configuration(id_prefix), **make_eventbridge_configuration()} + +def make_eventbridge_configuration(): + return { EVENTBRIDGE_CONFIGURATION: {} } def make_empty_notification_configuration(): config = {} @@ -50,11 +67,21 @@ def make_empty_notification_configuration(): config[t] = [] return config +def make_empty_notification_configuration_with_eventbridge(): + return {**make_empty_notification_configuration(), **make_eventbridge_configuration()} + def merge_notification_configurations(conf1: Dict, conf2: Dict): notifications = {} for t in CONFIGURATION_TYPES: notifications[t] = conf1.get(t, []) + conf2.get(t, []) + + if EVENTBRIDGE_CONFIGURATION in conf1: + notifications[EVENTBRIDGE_CONFIGURATION] = conf1[EVENTBRIDGE_CONFIGURATION] + + if EVENTBRIDGE_CONFIGURATION in conf2: + notifications[EVENTBRIDGE_CONFIGURATION] = conf2[EVENTBRIDGE_CONFIGURATION] + return notifications @@ -113,6 +140,22 @@ def test_create(self, _, get: MagicMock, put: MagicMock): event["ResourceProperties"]["NotificationConfiguration"], ) + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_create_with_eventbridge(self, _, get: MagicMock, put: MagicMock): + + get.return_value = {} + + event = make_event_with_eventbridge("Create", False) + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + event["ResourceProperties"]["NotificationConfiguration"], + ) + @patch("index.put_bucket_notification_configuration") @patch("index.get_bucket_notification_configuration") @patch("index.submit_response") @@ -131,6 +174,46 @@ def test_update(self, _, get: MagicMock, put: MagicMock): event["ResourceProperties"]["NotificationConfiguration"], ) + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_update_with_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event_with_eventbridge("Update", False) + + # simulate a previous create operation + current_notifications = make_notification_configuration(f"{event['StackId']}-") + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + event["ResourceProperties"]["NotificationConfiguration"], + ) + + + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_update_with_existing_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event("Update", False) + + # simulate a previous create operation + current_notifications = make_notification_configuration_with_eventbridge(f"{event['StackId']}-") + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + merge_notification_configurations( + make_eventbridge_configuration(), + event["ResourceProperties"]["NotificationConfiguration"], + ), + ) + @patch("index.put_bucket_notification_configuration") @patch("index.get_bucket_notification_configuration") @patch("index.submit_response") @@ -149,6 +232,24 @@ def test_delete(self, _, get: MagicMock, put: MagicMock): make_empty_notification_configuration(), ) + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_delete_with_eventbridge_should_not_remove_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event_with_eventbridge("Delete", False) + + # simulate a previous create operation + current_notifications = make_notification_configuration_with_eventbridge(f"{event['StackId']}-") + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + make_empty_notification_configuration_with_eventbridge(), + ) + class UnmanagedDirtyBucketTest(unittest.TestCase): @patch("index.put_bucket_notification_configuration") @@ -172,6 +273,48 @@ def test_create(self, _, get: MagicMock, put: MagicMock): ), ) + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_create_with_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event_with_eventbridge("Create", False) + + # simulate external notifications + current_notifications = make_notification_configuration() + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + merge_notification_configurations( + current_notifications, + event["ResourceProperties"]["NotificationConfiguration"], + ), + ) + + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_create_with_existing_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event("Create", False) + + # simulate external notifications + current_notifications = make_notification_configuration_with_eventbridge() + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + merge_notification_configurations( + current_notifications, + event["ResourceProperties"]["NotificationConfiguration"], + ), + ) + @patch("index.put_bucket_notification_configuration") @patch("index.get_bucket_notification_configuration") @patch("index.submit_response") @@ -193,6 +336,48 @@ def test_update(self, _, get: MagicMock, put: MagicMock): ), ) + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_update_with_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event_with_eventbridge("Update", False) + + # simulate external notifications + current_notifications = make_notification_configuration() + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + merge_notification_configurations( + current_notifications, + event["ResourceProperties"]["NotificationConfiguration"], + ), + ) + + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_update_without_eventbridge_should_not_remove_existing_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event("Update", False) + + # simulate external notifications + current_notifications = make_notification_configuration_with_eventbridge() + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + merge_notification_configurations( + current_notifications, + event["ResourceProperties"]["NotificationConfiguration"], + ), + ) + @patch("index.put_bucket_notification_configuration") @patch("index.get_bucket_notification_configuration") @patch("index.submit_response") @@ -211,6 +396,24 @@ def test_delete(self, _, get: MagicMock, put: MagicMock): current_notifications, ) + @patch("index.put_bucket_notification_configuration") + @patch("index.get_bucket_notification_configuration") + @patch("index.submit_response") + def test_delete_with_eventbridge_should_not_remove_eventbridge(self, _, get: MagicMock, put: MagicMock): + + event = make_event_with_eventbridge("Delete", False) + + # simulate external notifications + current_notifications = make_notification_configuration_with_eventbridge() + get.return_value = current_notifications + + index.handler(event, {}) + + put.assert_called_once_with( + event["ResourceProperties"]["BucketName"], + current_notifications, + ) + class CfnResponsesTest(unittest.TestCase): @patch("index.put_bucket_notification_configuration") From 304f5b6974f1121a8a5ff802076dffe2eff9f407 Mon Sep 17 00:00:00 2001 From: Choryu Park Date: Mon, 10 Jan 2022 22:00:31 +0900 Subject: [PATCH 028/374] feat(eks): cluster tagging (#4995) (#18109) Fixes #4995 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-eks/lib/cluster-resource.ts | 2 ++ packages/@aws-cdk/aws-eks/lib/cluster.ts | 9 ++++++++- .../aws-eks/test/integ.eks-cluster.expected.json | 3 +++ packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts | 3 +++ 4 files changed, 16 insertions(+), 1 deletion(-) diff --git a/packages/@aws-cdk/aws-eks/lib/cluster-resource.ts b/packages/@aws-cdk/aws-eks/lib/cluster-resource.ts index ed0852338a527..6a947380e3dd1 100644 --- a/packages/@aws-cdk/aws-eks/lib/cluster-resource.ts +++ b/packages/@aws-cdk/aws-eks/lib/cluster-resource.ts @@ -28,6 +28,7 @@ export interface ClusterResourceProps { readonly secretsEncryptionKey?: kms.IKey; readonly onEventLayer?: lambda.ILayerVersion; readonly clusterHandlerSecurityGroup?: ec2.ISecurityGroup; + readonly tags?: { [key: string]: string }; } /** @@ -89,6 +90,7 @@ export class ClusterResource extends CoreConstruct { endpointPrivateAccess: props.endpointPrivateAccess, publicAccessCidrs: props.publicAccessCidrs, }, + tags: props.tags, }, AssumeRoleArn: this.adminRole.roleArn, diff --git a/packages/@aws-cdk/aws-eks/lib/cluster.ts b/packages/@aws-cdk/aws-eks/lib/cluster.ts index 2b917f8f93f7e..10a3f2123aac1 100644 --- a/packages/@aws-cdk/aws-eks/lib/cluster.ts +++ b/packages/@aws-cdk/aws-eks/lib/cluster.ts @@ -744,13 +744,19 @@ export interface ClusterProps extends ClusterOptions { */ readonly defaultCapacityType?: DefaultCapacityType; - /** * The IAM role to pass to the Kubectl Lambda Handler. * * @default - Default Lambda IAM Execution Role */ readonly kubectlLambdaRole?: iam.IRole; + + /** + * The tags assigned to the EKS cluster + * + * @default - none + */ + readonly tags?: { [key: string]: string }; } /** @@ -1372,6 +1378,7 @@ export class Cluster extends ClusterBase { subnets: placeClusterHandlerInVpc ? privateSubnets : undefined, clusterHandlerSecurityGroup: this.clusterHandlerSecurityGroup, onEventLayer: this.onEventLayer, + tags: props.tags, }); if (this.endpointAccess._config.privateAccess && privateSubnets.length !== 0) { diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.expected.json b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.expected.json index 00ab6f9f6fe3c..7b5af8f848f2e 100644 --- a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.expected.json +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.expected.json @@ -951,6 +951,9 @@ ], "endpointPublicAccess": true, "endpointPrivateAccess": true + }, + "tags": { + "foo": "bar" } }, "AssumeRoleArn": { diff --git a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts index 5cbc0e384eb17..8dd012b3e2d66 100644 --- a/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts +++ b/packages/@aws-cdk/aws-eks/test/integ.eks-cluster.ts @@ -38,6 +38,9 @@ class EksClusterStack extends TestStack { defaultCapacity: 2, version: eks.KubernetesVersion.V1_21, secretsEncryptionKey, + tags: { + foo: 'bar', + }, }); this.assertFargateProfile(); From d94d9cec69a14006398c70a8c152bc21324d3d44 Mon Sep 17 00:00:00 2001 From: Rico Huijbers Date: Mon, 10 Jan 2022 15:28:22 +0100 Subject: [PATCH 029/374] docs(core): errors will fail deployment, not synthesis (#18342) Fix inaccurate information in the documentation. Closes #18317 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/core/lib/annotations.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/@aws-cdk/core/lib/annotations.ts b/packages/@aws-cdk/core/lib/annotations.ts index f46c830c25757..03fb7a99bd80a 100644 --- a/packages/@aws-cdk/core/lib/annotations.ts +++ b/packages/@aws-cdk/core/lib/annotations.ts @@ -44,7 +44,7 @@ export class Annotations { /** * Adds an { "error": } metadata entry to this construct. - * The toolkit will fail synthesis when errors are reported. + * The toolkit will fail deployment of any stack that has errors reported against it. * @param message The error message. */ public addError(message: string) { From 225668050caef9bfdaa25b8ae984d3886108397f Mon Sep 17 00:00:00 2001 From: Cory Hall <43035978+corymhall@users.noreply.github.com> Date: Mon, 10 Jan 2022 10:16:19 -0500 Subject: [PATCH 030/374] feat(cli): diff now uses the lookup Role for new-style synthesis (#18277) This PR exposes information on the bootstrap lookup role on the CloudFormation stack artifact. This enables the CLI to assume the lookup role during cli operations in order to lookup information in the stack account. Along with the ARN of the lookup role, this also exposes a `requiresBootstrapStackVersion` property which is set to `8` (the version the lookup role was given ReadOnlyAccess), and the `bootstrapStackVersionSsmParameter` which is needed to lookup the bootstrap version if a user has renamed the bootstrap stack. This allows us to first check whether the lookupRole exists and has the correct permissions prior to using it. This also updates the `diff` capability in the CLI (run as part of `cdk diff` or `cdk deploy`) to use this new functionality. It now will try to assume the lookupRole and if it doesn't exist or if the bootstrap stack version is not valid, then it will fallback to using the deployRole (what it uses currently). This PR also updates the `forEnvironment` function to return whether or not it is returning the default credentials. This allows the calling function to decide whether or not it actually wants to use the default credentials. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../lib/cloud-assembly/artifact-schema.ts | 40 +++ .../schema/cloud-assembly.schema.json | 31 ++- .../schema/cloud-assembly.version.json | 2 +- .../stack-synthesizers/default-synthesizer.ts | 33 +++ .../stack-synthesizers/stack-synthesizer.ts | 8 + .../lib/artifacts/cloudformation-artifact.ts | 8 + .../aws-cdk/lib/api/aws-auth/sdk-provider.ts | 39 ++- .../lib/api/bootstrap/deploy-bootstrap.ts | 2 +- .../lib/api/cloudformation-deployments.ts | 127 +++++++++- .../aws-cdk/lib/api/hotswap-deployments.ts | 2 +- packages/aws-cdk/lib/api/toolkit-info.ts | 39 ++- packages/aws-cdk/lib/context-providers/ami.ts | 2 +- .../context-providers/availability-zones.ts | 2 +- .../endpoint-service-availability-zones.ts | 2 +- .../lib/context-providers/hosted-zones.ts | 2 +- .../aws-cdk/lib/context-providers/keys.ts | 4 +- .../lib/context-providers/load-balancers.ts | 4 +- .../lib/context-providers/security-groups.ts | 2 +- .../lib/context-providers/ssm-parameters.ts | 2 +- .../aws-cdk/lib/context-providers/vpcs.ts | 2 +- packages/aws-cdk/lib/util/asset-publishing.ts | 4 +- .../api/cloudformation-deployments.test.ts | 2 +- .../aws-cdk/test/api/sdk-provider.test.ts | 22 +- packages/aws-cdk/test/cdk-toolkit.test.ts | 237 ++++++++++++++++++ .../context-providers/load-balancers.test.ts | 6 +- .../aws-cdk/test/util/cloudformation.test.ts | 2 +- packages/aws-cdk/test/util/mock-sdk.ts | 6 +- .../aws-cdk/test/util/mock-toolkitinfo.ts | 4 +- 28 files changed, 569 insertions(+), 67 deletions(-) diff --git a/packages/@aws-cdk/cloud-assembly-schema/lib/cloud-assembly/artifact-schema.ts b/packages/@aws-cdk/cloud-assembly-schema/lib/cloud-assembly/artifact-schema.ts index 9bf124c31c71d..4d98b3a29bb32 100644 --- a/packages/@aws-cdk/cloud-assembly-schema/lib/cloud-assembly/artifact-schema.ts +++ b/packages/@aws-cdk/cloud-assembly-schema/lib/cloud-assembly/artifact-schema.ts @@ -1,4 +1,37 @@ +/** + * Information needed to access an IAM role created + * as part of the bootstrap process + */ +export interface BootstrapRole { + /** + * The ARN of the IAM role created as part of bootrapping + * e.g. lookupRoleArn + */ + readonly arn: string; + + /** + * External ID to use when assuming the bootstrap role + * + * @default - No external ID + */ + readonly assumeRoleExternalId?: string; + + /** + * Version of bootstrap stack required to use this role + * + * @default - No bootstrap stack required + */ + readonly requiresBootstrapStackVersion?: number; + + /** + * Name of SSM parameter with bootstrap stack version + * + * @default - Discover SSM parameter by reading stack + */ + readonly bootstrapStackVersionSsmParameter?: string; +} + /** * Artifact properties for CloudFormation stacks. */ @@ -56,6 +89,13 @@ export interface AwsCloudFormationStackProperties { */ readonly cloudFormationExecutionRoleArn?: string; + /** + * The role to use to look up values from the target AWS account + * + * @default - No role is assumed (current credentials are used) + */ + readonly lookupRole?: BootstrapRole; + /** * If the stack template has already been included in the asset manifest, its asset URL * diff --git a/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.schema.json b/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.schema.json index 9241ae62ef0ff..5fe1f4fb4321a 100644 --- a/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.schema.json +++ b/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.schema.json @@ -307,6 +307,10 @@ "description": "The role that is passed to CloudFormation to execute the change set (Default - No role is passed (currently assumed role/credentials are used))", "type": "string" }, + "lookupRole": { + "description": "The role to use to look up values from the target AWS account (Default - No role is assumed (current credentials are used))", + "$ref": "#/definitions/BootstrapRole" + }, "stackTemplateAssetObjectUrl": { "description": "If the stack template has already been included in the asset manifest, its asset URL (Default - Not uploaded yet, upload just before deploying)", "type": "string" @@ -328,6 +332,31 @@ "templateFile" ] }, + "BootstrapRole": { + "description": "Information needed to access an IAM role created\nas part of the bootstrap process", + "type": "object", + "properties": { + "arn": { + "description": "The ARN of the IAM role created as part of bootrapping\ne.g. lookupRoleArn", + "type": "string" + }, + "assumeRoleExternalId": { + "description": "External ID to use when assuming the bootstrap role (Default - No external ID)", + "type": "string" + }, + "requiresBootstrapStackVersion": { + "description": "Version of bootstrap stack required to use this role (Default - No bootstrap stack required)", + "type": "number" + }, + "bootstrapStackVersionSsmParameter": { + "description": "Name of SSM parameter with bootstrap stack version (Default - Discover SSM parameter by reading stack)", + "type": "string" + } + }, + "required": [ + "arn" + ] + }, "AssetManifestProperties": { "description": "Artifact properties for the Asset Manifest", "type": "object", @@ -598,7 +627,7 @@ } }, "returnAsymmetricSubnets": { - "description": "Whether to populate the subnetGroups field of the {@link VpcContextResponse},\nwhich contains potentially asymmetric subnet groups.", + "description": "Whether to populate the subnetGroups field of the{@linkVpcContextResponse},\nwhich contains potentially asymmetric subnet groups.", "default": false, "type": "boolean" }, diff --git a/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.version.json b/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.version.json index 01d4f111912e9..5bdbc9d33c3b3 100644 --- a/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.version.json +++ b/packages/@aws-cdk/cloud-assembly-schema/schema/cloud-assembly.version.json @@ -1 +1 @@ -{"version":"15.0.0"} \ No newline at end of file +{"version":"16.0.0"} \ No newline at end of file diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts index d8e1f8818abc4..ed537e496128a 100644 --- a/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/default-synthesizer.ts @@ -21,6 +21,12 @@ export const BOOTSTRAP_QUALIFIER_CONTEXT = '@aws-cdk/core:bootstrapQualifier'; */ const MIN_BOOTSTRAP_STACK_VERSION = 6; +/** + * The minimum bootstrap stack version required + * to use the lookup role. + */ +const MIN_LOOKUP_ROLE_BOOTSTRAP_STACK_VERSION = 8; + /** * Configuration properties for DefaultStackSynthesizer */ @@ -91,6 +97,25 @@ export interface DefaultStackSynthesizerProps { */ readonly lookupRoleArn?: string; + /** + * External ID to use when assuming lookup role + * + * @default - No external ID + */ + readonly lookupRoleExternalId?: string; + + /** + * Use the bootstrapped lookup role for (read-only) stack operations + * + * Use the lookup role when performing a `cdk diff`. If set to `false`, the + * `deploy role` credentials will be used to perform a `cdk diff`. + * + * Requires bootstrap stack version 8. + * + * @default true + */ + readonly useLookupRoleForStackOperations?: boolean; + /** * External ID to use when assuming role for image asset publishing * @@ -269,6 +294,7 @@ export class DefaultStackSynthesizer extends StackSynthesizer { private fileAssetPublishingRoleArn?: string; private imageAssetPublishingRoleArn?: string; private lookupRoleArn?: string; + private useLookupRoleForStackOperations: boolean; private qualifier?: string; private bucketPrefix?: string; private dockerTagPrefix?: string; @@ -279,6 +305,7 @@ export class DefaultStackSynthesizer extends StackSynthesizer { constructor(private readonly props: DefaultStackSynthesizerProps = {}) { super(); + this.useLookupRoleForStackOperations = props.useLookupRoleForStackOperations ?? true; for (const key in props) { if (props.hasOwnProperty(key)) { @@ -453,6 +480,12 @@ export class DefaultStackSynthesizer extends StackSynthesizer { requiresBootstrapStackVersion: MIN_BOOTSTRAP_STACK_VERSION, bootstrapStackVersionSsmParameter: this.bootstrapStackVersionSsmParameter, additionalDependencies: [artifactId], + lookupRole: this.useLookupRoleForStackOperations && this.lookupRoleArn ? { + arn: this.lookupRoleArn, + assumeRoleExternalId: this.props.lookupRoleExternalId, + requiresBootstrapStackVersion: MIN_LOOKUP_ROLE_BOOTSTRAP_STACK_VERSION, + bootstrapStackVersionSsmParameter: this.bootstrapStackVersionSsmParameter, + } : undefined, }); } diff --git a/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts b/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts index 3b283eaae24ce..ea7c7745f2419 100644 --- a/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts +++ b/packages/@aws-cdk/core/lib/stack-synthesizers/stack-synthesizer.ts @@ -1,3 +1,4 @@ +import * as cxschema from '@aws-cdk/cloud-assembly-schema'; import { DockerImageAssetLocation, DockerImageAssetSource, FileAssetLocation, FileAssetSource } from '../assets'; import { ISynthesisSession } from '../construct-compat'; import { Stack } from '../stack'; @@ -100,6 +101,13 @@ export interface SynthesizeStackArtifactOptions { */ readonly cloudFormationExecutionRoleArn?: string; + /** + * The role to use to look up values from the target AWS account + * + * @default - None + */ + readonly lookupRole?: cxschema.BootstrapRole; + /** * If the stack template has already been included in the asset manifest, its asset URL * diff --git a/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts b/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts index 225f256e85f5f..66fc309a2593c 100644 --- a/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts +++ b/packages/@aws-cdk/cx-api/lib/artifacts/cloudformation-artifact.ts @@ -75,6 +75,13 @@ export class CloudFormationStackArtifact extends CloudArtifact { */ public readonly cloudFormationExecutionRoleArn?: string; + /** + * The role to use to look up values from the target AWS account + * + * @default - No role is assumed (current credentials are used) + */ + public readonly lookupRole?: cxschema.BootstrapRole; + /** * If the stack template has already been included in the asset manifest, its asset URL * @@ -135,6 +142,7 @@ export class CloudFormationStackArtifact extends CloudArtifact { this.bootstrapStackVersionSsmParameter = properties.bootstrapStackVersionSsmParameter; this.terminationProtection = properties.terminationProtection; this.validateOnSynth = properties.validateOnSynth; + this.lookupRole = properties.lookupRole; this.stackName = properties.stackName || artifactId; this.assets = this.findMetadataByType(cxschema.ArtifactMetadataEntryType.ASSET).map(e => e.data as cxschema.AssetMetadataEntry); diff --git a/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts b/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts index 0da0b027bbc65..ad2af9f62ef61 100644 --- a/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts +++ b/packages/aws-cdk/lib/api/aws-auth/sdk-provider.ts @@ -77,6 +77,33 @@ export interface SdkHttpOptions { const CACHED_ACCOUNT = Symbol('cached_account'); const CACHED_DEFAULT_CREDENTIALS = Symbol('cached_default_credentials'); +/** + * SDK configuration for a given environment + * 'forEnvironment' will attempt to assume a role and if it + * is not successful, then it will either: + * 1. Check to see if the default credentials (local credentials the CLI was executed with) + * are for the given environment. If they are then return those. + * 2. If the default credentials are not for the given environment then + * throw an error + * + * 'didAssumeRole' allows callers to whether they are receiving the assume role + * credentials or the default credentials. + */ +export interface SdkForEnvironment { + /** + * The SDK for the given environment + */ + readonly sdk: ISDK; + + /** + * Whether or not the assume role was successful. + * If the assume role was not successful (false) + * then that means that the 'sdk' returned contains + * the default credentials (not the assume role credentials) + */ + readonly didAssumeRole: boolean; +} + /** * Creates instances of the AWS SDK appropriate for a given account/region. * @@ -140,7 +167,11 @@ export class SdkProvider { * * The `environment` parameter is resolved first (see `resolveEnvironment()`). */ - public async forEnvironment(environment: cxapi.Environment, mode: Mode, options?: CredentialsOptions): Promise { + public async forEnvironment( + environment: cxapi.Environment, + mode: Mode, + options?: CredentialsOptions, + ): Promise { const env = await this.resolveEnvironment(environment); const baseCreds = await this.obtainBaseCredentials(env.account, mode); @@ -151,7 +182,7 @@ export class SdkProvider { // account. if (options?.assumeRoleArn === undefined) { if (baseCreds.source === 'incorrectDefault') { throw new Error(fmtObtainCredentialsError(env.account, baseCreds)); } - return new SDK(baseCreds.credentials, env.region, this.sdkOptions); + return { sdk: new SDK(baseCreds.credentials, env.region, this.sdkOptions), didAssumeRole: false }; } // We will proceed to AssumeRole using whatever we've been given. @@ -161,7 +192,7 @@ export class SdkProvider { // we can determine whether the AssumeRole call succeeds or not. try { await sdk.forceCredentialRetrieval(); - return sdk; + return { sdk, didAssumeRole: true }; } catch (e) { // AssumeRole failed. Proceed and warn *if and only if* the baseCredentials were already for the right account // or returned from a plugin. This is to cover some current setups for people using plugins or preferring to @@ -170,7 +201,7 @@ export class SdkProvider { if (baseCreds.source === 'correctDefault' || baseCreds.source === 'plugin') { debug(e.message); warning(`${fmtObtainedCredentials(baseCreds)} could not be used to assume '${options.assumeRoleArn}', but are for the right account. Proceeding anyway.`); - return new SDK(baseCreds.credentials, env.region, this.sdkOptions); + return { sdk: new SDK(baseCreds.credentials, env.region, this.sdkOptions), didAssumeRole: false }; } throw e; diff --git a/packages/aws-cdk/lib/api/bootstrap/deploy-bootstrap.ts b/packages/aws-cdk/lib/api/bootstrap/deploy-bootstrap.ts index 49f97e71332c3..8cece9d8eed30 100644 --- a/packages/aws-cdk/lib/api/bootstrap/deploy-bootstrap.ts +++ b/packages/aws-cdk/lib/api/bootstrap/deploy-bootstrap.ts @@ -27,7 +27,7 @@ export class BootstrapStack { toolkitStackName = toolkitStackName ?? DEFAULT_TOOLKIT_STACK_NAME; const resolvedEnvironment = await sdkProvider.resolveEnvironment(environment); - const sdk = await sdkProvider.forEnvironment(resolvedEnvironment, Mode.ForWriting); + const sdk = (await sdkProvider.forEnvironment(resolvedEnvironment, Mode.ForWriting)).sdk; const currentToolkitInfo = await ToolkitInfo.lookup(resolvedEnvironment, sdk, toolkitStackName); diff --git a/packages/aws-cdk/lib/api/cloudformation-deployments.ts b/packages/aws-cdk/lib/api/cloudformation-deployments.ts index fb7c5410faf3d..c461c5ac24dc5 100644 --- a/packages/aws-cdk/lib/api/cloudformation-deployments.ts +++ b/packages/aws-cdk/lib/api/cloudformation-deployments.ts @@ -1,9 +1,9 @@ import * as cxapi from '@aws-cdk/cx-api'; import { AssetManifest } from 'cdk-assets'; import { Tag } from '../cdk-toolkit'; -import { debug } from '../logging'; +import { debug, warning } from '../logging'; import { publishAssets } from '../util/asset-publishing'; -import { Mode, SdkProvider } from './aws-auth'; +import { Mode, SdkProvider, ISDK } from './aws-auth'; import { deployStack, DeployStackResult, destroyStack } from './deploy-stack'; import { ToolkitInfo } from './toolkit-info'; import { CloudFormationStack, Template } from './util/cloudformation'; @@ -171,6 +171,54 @@ export interface ProvisionerProps { sdkProvider: SdkProvider; } +/** + * SDK obtained by assuming the lookup role + * for a given environment + */ +export interface PreparedSdkWithLookupRoleForEnvironment { + /** + * The SDK for the given environment + */ + readonly sdk: ISDK; + + /** + * The resolved environment for the stack + * (no more 'unknown-account/unknown-region') + */ + readonly resolvedEnvironment: cxapi.Environment; + + /** + * Whether or not the assume role was successful. + * If the assume role was not successful (false) + * then that means that the 'sdk' returned contains + * the default credentials (not the assume role credentials) + */ + readonly didAssumeRole: boolean; +} + +/** + * SDK obtained by assuming the deploy role + * for a given environment + */ +export interface PreparedSdkForEnvironment { + /** + * The SDK for the given environment + */ + readonly stackSdk: ISDK; + + /** + * The resolved environment for the stack + * (no more 'unknown-account/unknown-region') + */ + readonly resolvedEnvironment: cxapi.Environment; + /** + * The Execution Role that should be passed to CloudFormation. + * + * @default - no execution role is used + */ + readonly cloudFormationRoleArn?: string; +} + /** * Helper class for CloudFormation deployments * @@ -186,7 +234,19 @@ export class CloudFormationDeployments { public async readCurrentTemplate(stackArtifact: cxapi.CloudFormationStackArtifact): Promise