diff --git a/.changes/1.34.97.json b/.changes/1.34.97.json new file mode 100644 index 0000000000..149e4fb069 --- /dev/null +++ b/.changes/1.34.97.json @@ -0,0 +1,22 @@ +[ + { + "category": "``dynamodb``", + "description": "This release adds support to specify an optional, maximum OnDemandThroughput for DynamoDB tables and global secondary indexes in the CreateTable or UpdateTable APIs. You can also override the OnDemandThroughput settings by calling the ImportTable, RestoreFromPointInTime, or RestoreFromBackup APIs.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "This release includes a new API for retrieving the public endorsement key of the EC2 instance's Nitro Trusted Platform Module (NitroTPM).", + "type": "api-change" + }, + { + "category": "``personalize``", + "description": "This releases ability to delete users and their data, including their metadata and interactions data, from a dataset group.", + "type": "api-change" + }, + { + "category": "``redshift-serverless``", + "description": "Update Redshift Serverless List Scheduled Actions Output Response to include Namespace Name.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7015340546..d5a32a3335 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,15 @@ CHANGELOG ========= +1.34.97 +======= + +* api-change:``dynamodb``: This release adds support to specify an optional, maximum OnDemandThroughput for DynamoDB tables and global secondary indexes in the CreateTable or UpdateTable APIs. You can also override the OnDemandThroughput settings by calling the ImportTable, RestoreFromPointInTime, or RestoreFromBackup APIs. +* api-change:``ec2``: This release includes a new API for retrieving the public endorsement key of the EC2 instance's Nitro Trusted Platform Module (NitroTPM). +* api-change:``personalize``: This releases ability to delete users and their data, including their metadata and interactions data, from a dataset group. +* api-change:``redshift-serverless``: Update Redshift Serverless List Scheduled Actions Output Response to include Namespace Name. + + 1.34.96 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 7efed47917..83eb8af87e 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.34.96' +__version__ = '1.34.97' class NullHandler(logging.Handler): diff --git a/botocore/data/dynamodb/2012-08-10/service-2.json b/botocore/data/dynamodb/2012-08-10/service-2.json index 713432d8d0..a8f3c338c9 100644 --- a/botocore/data/dynamodb/2012-08-10/service-2.json +++ b/botocore/data/dynamodb/2012-08-10/service-2.json @@ -513,7 +513,7 @@ {"shape":"InternalServerError"}, {"shape":"PolicyNotFoundException"} ], - "documentation":"

Returns the resource-based policy document attached to the resource, which can be a table or stream, in JSON format.

GetResourcePolicy follows an eventually consistent model. The following list describes the outcomes when you issue the GetResourcePolicy request immediately after issuing another request:

Because GetResourcePolicy uses an eventually consistent query, the metadata for your policy or table might not be available at that moment. Wait for a few seconds, and then retry the GetResourcePolicy request.

After a GetResourcePolicy request returns a policy created using the PutResourcePolicy request, you can assume the policy will start getting applied in the authorization of requests to the resource. Because this process is eventually consistent, it will take some time to apply the policy to all requests to a resource. Policies that you attach while creating a table using the CreateTable request will always be applied to all requests for that table.

", + "documentation":"

Returns the resource-based policy document attached to the resource, which can be a table or stream, in JSON format.

GetResourcePolicy follows an eventually consistent model. The following list describes the outcomes when you issue the GetResourcePolicy request immediately after issuing another request:

Because GetResourcePolicy uses an eventually consistent query, the metadata for your policy or table might not be available at that moment. Wait for a few seconds, and then retry the GetResourcePolicy request.

After a GetResourcePolicy request returns a policy created using the PutResourcePolicy request, the policy will be applied in the authorization of requests to the resource. Because this process is eventually consistent, it will take some time to apply the policy to all requests to a resource. Policies that you attach while creating a table using the CreateTable request will always be applied to all requests for that table.

", "endpointdiscovery":{ } }, @@ -670,7 +670,7 @@ {"shape":"PolicyNotFoundException"}, {"shape":"ResourceInUseException"} ], - "documentation":"

Attaches a resource-based policy document to the resource, which can be a table or stream. When you attach a resource-based policy using this API, the policy application is eventually consistent .

PutResourcePolicy is an idempotent operation; running it multiple times on the same resource using the same policy document will return the same revision ID. If you specify an ExpectedRevisionId which doesn't match the current policy's RevisionId, the PolicyNotFoundException will be returned.

PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy request immediately after a PutResourcePolicy request, DynamoDB might return your previous policy, if there was one, or return the PolicyNotFoundException. This is because GetResourcePolicy uses an eventually consistent query, and the metadata for your policy or table might not be available at that moment. Wait for a few seconds, and then try the GetResourcePolicy request again.

", + "documentation":"

Attaches a resource-based policy document to the resource, which can be a table or stream. When you attach a resource-based policy using this API, the policy application is eventually consistent .

PutResourcePolicy is an idempotent operation; running it multiple times on the same resource using the same policy document will return the same revision ID. If you specify an ExpectedRevisionId that doesn't match the current policy's RevisionId, the PolicyNotFoundException will be returned.

PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy request immediately after a PutResourcePolicy request, DynamoDB might return your previous policy, if there was one, or return the PolicyNotFoundException. This is because GetResourcePolicy uses an eventually consistent query, and the metadata for your policy or table might not be available at that moment. Wait for a few seconds, and then try the GetResourcePolicy request again.

", "endpointdiscovery":{ } }, @@ -1971,6 +1971,10 @@ "ProvisionedThroughput":{ "shape":"ProvisionedThroughput", "documentation":"

Represents the provisioned throughput settings for the specified global secondary index.

For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide.

" + }, + "OnDemandThroughput":{ + "shape":"OnDemandThroughput", + "documentation":"

The maximum number of read and write units for the global secondary index being created. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" } }, "documentation":"

Represents a new global secondary index to be added to an existing table.

" @@ -2028,6 +2032,10 @@ "shape":"ProvisionedThroughputOverride", "documentation":"

Replica-specific provisioned throughput. If not specified, uses the source table's provisioned throughput settings.

" }, + "OnDemandThroughputOverride":{ + "shape":"OnDemandThroughputOverride", + "documentation":"

The maximum on-demand throughput settings for the specified replica table being created. You can only modify MaxReadRequestUnits, because you can't modify MaxWriteRequestUnits for individual replica tables.

" + }, "GlobalSecondaryIndexes":{ "shape":"ReplicaGlobalSecondaryIndexList", "documentation":"

Replica-specific global secondary index settings.

" @@ -2097,7 +2105,11 @@ }, "ResourcePolicy":{ "shape":"ResourcePolicy", - "documentation":"

An Amazon Web Services resource-based policy document in JSON format that will be attached to the table.

When you attach a resource-based policy while creating a table, the policy creation is strongly consistent.

The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. You can’t request an increase for this limit. For a full list of all considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations.

" + "documentation":"

An Amazon Web Services resource-based policy document in JSON format that will be attached to the table.

When you attach a resource-based policy while creating a table, the policy application is strongly consistent.

The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. For a full list of all considerations that apply for resource-based policies, see Resource-based policy considerations.

" + }, + "OnDemandThroughput":{ + "shape":"OnDemandThroughput", + "documentation":"

Sets the maximum number of read and write units for the specified table in on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" } }, "documentation":"

Represents the input of a CreateTable operation.

" @@ -2330,7 +2342,7 @@ "members":{ "RevisionId":{ "shape":"PolicyRevisionId", - "documentation":"

A unique string that represents the revision ID of the policy. If you are comparing revision IDs, make sure to always use string comparison logic.

This value will be empty if you make a request against a resource without a policy.

" + "documentation":"

A unique string that represents the revision ID of the policy. If you're comparing revision IDs, make sure to always use string comparison logic.

This value will be empty if you make a request against a resource without a policy.

" } } }, @@ -3169,7 +3181,7 @@ }, "RevisionId":{ "shape":"PolicyRevisionId", - "documentation":"

A unique string that represents the revision ID of the policy. If you are comparing revision IDs, make sure to always use string comparison logic.

" + "documentation":"

A unique string that represents the revision ID of the policy. If you're comparing revision IDs, make sure to always use string comparison logic.

" } } }, @@ -3196,6 +3208,10 @@ "ProvisionedThroughput":{ "shape":"ProvisionedThroughput", "documentation":"

Represents the provisioned throughput settings for the specified global secondary index.

For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide.

" + }, + "OnDemandThroughput":{ + "shape":"OnDemandThroughput", + "documentation":"

The maximum number of read and write units for the specified global secondary index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" } }, "documentation":"

Represents the properties of a global secondary index.

" @@ -3254,6 +3270,10 @@ "IndexArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) that uniquely identifies the index.

" + }, + "OnDemandThroughput":{ + "shape":"OnDemandThroughput", + "documentation":"

The maximum number of read and write units for the specified global secondary index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" } }, "documentation":"

Represents the properties of a global secondary index.

" @@ -3280,7 +3300,8 @@ "ProvisionedThroughput":{ "shape":"ProvisionedThroughput", "documentation":"

Represents the provisioned throughput settings for the specified global secondary index.

" - } + }, + "OnDemandThroughput":{"shape":"OnDemandThroughput"} }, "documentation":"

Represents the properties of a global secondary index for the table when the backup was created.

" }, @@ -4308,6 +4329,30 @@ "type":"list", "member":{"shape":"NumberAttributeValue"} }, + "OnDemandThroughput":{ + "type":"structure", + "members":{ + "MaxReadRequestUnits":{ + "shape":"LongObject", + "documentation":"

Maximum number of read request units for the specified table.

To specify a maximum OnDemandThroughput on your table, set the value of MaxReadRequestUnits as greater than or equal to 1. To remove the maximum OnDemandThroughput that is currently set on your table, set the value of MaxReadRequestUnits to -1.

" + }, + "MaxWriteRequestUnits":{ + "shape":"LongObject", + "documentation":"

Maximum number of write request units for the specified table.

To specify a maximum OnDemandThroughput on your table, set the value of MaxWriteRequestUnits as greater than or equal to 1. To remove the maximum OnDemandThroughput that is currently set on your table, set the value of MaxWriteRequestUnits to -1.

" + } + }, + "documentation":"

Sets the maximum number of read and write units for the specified on-demand table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" + }, + "OnDemandThroughputOverride":{ + "type":"structure", + "members":{ + "MaxReadRequestUnits":{ + "shape":"LongObject", + "documentation":"

Maximum number of read request units for the specified replica table.

" + } + }, + "documentation":"

Overrides the on-demand throughput settings for this replica table. If you don't specify a value for this parameter, it uses the source table's on-demand throughput settings.

" + }, "ParameterizedStatement":{ "type":"structure", "required":["Statement"], @@ -4647,11 +4692,11 @@ }, "Policy":{ "shape":"ResourcePolicy", - "documentation":"

An Amazon Web Services resource-based policy document in JSON format.

The maximum size supported for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when calculating the size of a policy against this limit. For a full list of all considerations that you should keep in mind while attaching a resource-based policy, see Resource-based policy considerations.

" + "documentation":"

An Amazon Web Services resource-based policy document in JSON format.

For a full list of all considerations that apply while attaching a resource-based policy, see Resource-based policy considerations.

" }, "ExpectedRevisionId":{ "shape":"PolicyRevisionId", - "documentation":"

A string value that you can use to conditionally update your policy. You can provide the revision ID of your existing policy to make mutating requests against that policy. When you provide an expected revision ID, if the revision ID of the existing policy on the resource doesn't match or if there's no policy attached to the resource, your request will be rejected with a PolicyNotFoundException.

To conditionally put a policy when no policy exists for the resource, specify NO_POLICY for the revision ID.

" + "documentation":"

A string value that you can use to conditionally update your policy. You can provide the revision ID of your existing policy to make mutating requests against that policy.

When you provide an expected revision ID, if the revision ID of the existing policy on the resource doesn't match or if there's no policy attached to the resource, your request will be rejected with a PolicyNotFoundException.

To conditionally attach a policy when no policy exists for the resource, specify NO_POLICY for the revision ID.

" }, "ConfirmRemoveSelfResourceAccess":{ "shape":"ConfirmRemoveSelfResourceAccess", @@ -4664,7 +4709,7 @@ "members":{ "RevisionId":{ "shape":"PolicyRevisionId", - "documentation":"

A unique string that represents the revision ID of the policy. If you are comparing revision IDs, make sure to always use string comparison logic.

" + "documentation":"

A unique string that represents the revision ID of the policy. If you're comparing revision IDs, make sure to always use string comparison logic.

" } } }, @@ -4857,6 +4902,10 @@ "shape":"ProvisionedThroughputOverride", "documentation":"

Replica-specific provisioned throughput. If not described, uses the source table's provisioned throughput settings.

" }, + "OnDemandThroughputOverride":{ + "shape":"OnDemandThroughputOverride", + "documentation":"

Overrides the maximum on-demand throughput settings for the specified replica table.

" + }, "GlobalSecondaryIndexes":{ "shape":"ReplicaGlobalSecondaryIndexDescriptionList", "documentation":"

Replica-specific global secondary index settings.

" @@ -4884,6 +4933,10 @@ "ProvisionedThroughputOverride":{ "shape":"ProvisionedThroughputOverride", "documentation":"

Replica table GSI-specific provisioned throughput. If not specified, uses the source table GSI's read capacity settings.

" + }, + "OnDemandThroughputOverride":{ + "shape":"OnDemandThroughputOverride", + "documentation":"

Overrides the maximum on-demand throughput settings for the specified global secondary index in the specified replica table.

" } }, "documentation":"

Represents the properties of a replica global secondary index.

" @@ -4933,6 +4986,10 @@ "ProvisionedThroughputOverride":{ "shape":"ProvisionedThroughputOverride", "documentation":"

If not described, uses the source table GSI's read capacity settings.

" + }, + "OnDemandThroughputOverride":{ + "shape":"OnDemandThroughputOverride", + "documentation":"

Overrides the maximum on-demand throughput for the specified global secondary index in the specified replica table.

" } }, "documentation":"

Represents the properties of a replica global secondary index.

" @@ -5244,6 +5301,7 @@ "shape":"ProvisionedThroughput", "documentation":"

Provisioned throughput settings for the restored table.

" }, + "OnDemandThroughputOverride":{"shape":"OnDemandThroughput"}, "SSESpecificationOverride":{ "shape":"SSESpecification", "documentation":"

The new server-side encryption settings for the restored table.

" @@ -5299,6 +5357,7 @@ "shape":"ProvisionedThroughput", "documentation":"

Provisioned throughput settings for the restored table.

" }, + "OnDemandThroughputOverride":{"shape":"OnDemandThroughput"}, "SSESpecificationOverride":{ "shape":"SSESpecification", "documentation":"

The new server-side encryption settings for the restored table.

" @@ -5613,6 +5672,7 @@ "shape":"ProvisionedThroughput", "documentation":"

Read IOPs and Write IOPS on the table when the backup was created.

" }, + "OnDemandThroughput":{"shape":"OnDemandThroughput"}, "ItemCount":{ "shape":"ItemCount", "documentation":"

Number of items in the table. Note that this is an approximate value.

" @@ -5764,6 +5824,7 @@ "documentation":"

The billing mode for provisioning the table created as part of the import operation.

" }, "ProvisionedThroughput":{"shape":"ProvisionedThroughput"}, + "OnDemandThroughput":{"shape":"OnDemandThroughput"}, "SSESpecification":{"shape":"SSESpecification"}, "GlobalSecondaryIndexes":{ "shape":"GlobalSecondaryIndexList", @@ -5866,6 +5927,10 @@ "DeletionProtectionEnabled":{ "shape":"DeletionProtectionEnabled", "documentation":"

Indicates whether deletion protection is enabled (true) or disabled (false) on the table.

" + }, + "OnDemandThroughput":{ + "shape":"OnDemandThroughput", + "documentation":"

The maximum number of read and write units for the specified on-demand table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" } }, "documentation":"

Represents the properties of a table.

" @@ -6270,10 +6335,7 @@ "UpdateExpression":{"type":"string"}, "UpdateGlobalSecondaryIndexAction":{ "type":"structure", - "required":[ - "IndexName", - "ProvisionedThroughput" - ], + "required":["IndexName"], "members":{ "IndexName":{ "shape":"IndexName", @@ -6282,6 +6344,10 @@ "ProvisionedThroughput":{ "shape":"ProvisionedThroughput", "documentation":"

Represents the provisioned throughput settings for the specified global secondary index.

For current minimum and maximum provisioned throughput values, see Service, Account, and Table Quotas in the Amazon DynamoDB Developer Guide.

" + }, + "OnDemandThroughput":{ + "shape":"OnDemandThroughput", + "documentation":"

Updates the maximum number of read and write units for the specified global secondary index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" } }, "documentation":"

Represents the new provisioned throughput settings to be applied to a global secondary index.

" @@ -6500,6 +6566,10 @@ "shape":"ProvisionedThroughputOverride", "documentation":"

Replica-specific provisioned throughput. If not specified, uses the source table's provisioned throughput settings.

" }, + "OnDemandThroughputOverride":{ + "shape":"OnDemandThroughputOverride", + "documentation":"

Overrides the maximum on-demand throughput for the replica table.

" + }, "GlobalSecondaryIndexes":{ "shape":"ReplicaGlobalSecondaryIndexList", "documentation":"

Replica-specific global secondary index settings.

" @@ -6554,6 +6624,10 @@ "DeletionProtectionEnabled":{ "shape":"DeletionProtectionEnabled", "documentation":"

Indicates whether deletion protection is to be enabled (true) or disabled (false) on the table.

" + }, + "OnDemandThroughput":{ + "shape":"OnDemandThroughput", + "documentation":"

Updates the maximum number of read and write units for the specified table in on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits, or both.

" } }, "documentation":"

Represents the input of an UpdateTable operation.

" diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index b5d96dea03..84b1554dbd 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -4407,6 +4407,16 @@ "output":{"shape":"GetInstanceMetadataDefaultsResult"}, "documentation":"

Gets the default instance metadata service (IMDS) settings that are set at the account level in the specified Amazon Web Services
 Region.

For more information, see Order of precedence for instance metadata options in the Amazon EC2 User Guide.

" }, + "GetInstanceTpmEkPub":{ + "name":"GetInstanceTpmEkPub", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetInstanceTpmEkPubRequest"}, + "output":{"shape":"GetInstanceTpmEkPubResult"}, + "documentation":"

Gets the public endorsement key associated with the Nitro Trusted Platform Module (NitroTPM) for the specified instance.

" + }, "GetInstanceTypesFromInstanceRequirements":{ "name":"GetInstanceTypesFromInstanceRequirements", "http":{ @@ -27420,6 +27430,24 @@ "locationName":"item" } }, + "EkPubKeyFormat":{ + "type":"string", + "enum":[ + "der", + "tpmt" + ] + }, + "EkPubKeyType":{ + "type":"string", + "enum":[ + "rsa-2048", + "ecc-sec-p384" + ] + }, + "EkPubKeyValue":{ + "type":"string", + "sensitive":true + }, "ElasticGpuAssociation":{ "type":"structure", "members":{ @@ -30838,6 +30866,57 @@ } } }, + "GetInstanceTpmEkPubRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "KeyType", + "KeyFormat" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The ID of the instance for which to get the public endorsement key.

" + }, + "KeyType":{ + "shape":"EkPubKeyType", + "documentation":"

The required public endorsement key type.

" + }, + "KeyFormat":{ + "shape":"EkPubKeyFormat", + "documentation":"

The required public endorsement key format. Specify der for a DER-encoded public key that is compatible with OpenSSL. Specify tpmt for a TPM 2.0 format that is compatible with tpm2-tools. The returned key is base64 encoded.

" + }, + "DryRun":{ + "shape":"Boolean", + "documentation":"

Specify this parameter to verify whether the request will succeed, without actually making the request. If the request will succeed, the response is DryRunOperation. Otherwise, the response is UnauthorizedOperation.

" + } + } + }, + "GetInstanceTpmEkPubResult":{ + "type":"structure", + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "documentation":"

The ID of the instance.

", + "locationName":"instanceId" + }, + "KeyType":{ + "shape":"EkPubKeyType", + "documentation":"

The public endorsement key type.

", + "locationName":"keyType" + }, + "KeyFormat":{ + "shape":"EkPubKeyFormat", + "documentation":"

The public endorsement key format.

", + "locationName":"keyFormat" + }, + "KeyValue":{ + "shape":"EkPubKeyValue", + "documentation":"

The public endorsement key material.

", + "locationName":"keyValue" + } + } + }, "GetInstanceTypesFromInstanceRequirementsRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/personalize/2018-05-22/service-2.json b/botocore/data/personalize/2018-05-22/service-2.json index e152ed32d5..6e54714268 100644 --- a/botocore/data/personalize/2018-05-22/service-2.json +++ b/botocore/data/personalize/2018-05-22/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"personalize", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceFullName":"Amazon Personalize", "serviceId":"Personalize", "signatureVersion":"v4", @@ -68,6 +69,24 @@ "documentation":"

You incur campaign costs while it is active. To avoid unnecessary costs, make sure to delete the campaign when you are finished. For information about campaign costs, see Amazon Personalize pricing.

Creates a campaign that deploys a solution version. When a client calls the GetRecommendations and GetPersonalizedRanking APIs, a campaign is specified in the request.

Minimum Provisioned TPS and Auto-Scaling

A high minProvisionedTPS will increase your cost. We recommend starting with 1 for minProvisionedTPS (the default). Track your usage using Amazon CloudWatch metrics, and increase the minProvisionedTPS as necessary.

When you create an Amazon Personalize campaign, you can specify the minimum provisioned transactions per second (minProvisionedTPS) for the campaign. This is the baseline transaction throughput for the campaign provisioned by Amazon Personalize. It sets the minimum billing charge for the campaign while it is active. A transaction is a single GetRecommendations or GetPersonalizedRanking request. The default minProvisionedTPS is 1.

If your TPS increases beyond the minProvisionedTPS, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minProvisionedTPS. There's a short time delay while the capacity is increased that might cause loss of transactions. When your traffic reduces, capacity returns to the minProvisionedTPS.

You are charged for the the minimum provisioned TPS or, if your requests exceed the minProvisionedTPS, the actual TPS. The actual TPS is the total number of recommendation requests you make. We recommend starting with a low minProvisionedTPS, track your usage using Amazon CloudWatch metrics, and then increase the minProvisionedTPS as necessary.

For more information about campaign costs, see Amazon Personalize pricing.

Status

A campaign can be in one of the following states:

To get the campaign status, call DescribeCampaign.

Wait until the status of the campaign is ACTIVE before asking the campaign for recommendations.

Related APIs

", "idempotent":true }, + "CreateDataDeletionJob":{ + "name":"CreateDataDeletionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDataDeletionJobRequest"}, + "output":{"shape":"CreateDataDeletionJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"TooManyTagsException"} + ], + "documentation":"

Creates a batch job that deletes all references to specific users from an Amazon Personalize dataset group in batches. You specify the users to delete in a CSV file of userIds in an Amazon S3 bucket. After a job completes, Amazon Personalize no longer trains on the users’ data and no longer considers the users when generating user segments. For more information about creating a data deletion job, see Deleting users.

After you create a job, it can take up to a day to delete all references to the users from datasets and models. Until the job completes, Amazon Personalize continues to use the data when training. And if you use a User Segmentation recipe, the users might appear in user segments.

Status

A data deletion job can have one of the following statuses:

To get the status of the data deletion job, call DescribeDataDeletionJob API operation and specify the Amazon Resource Name (ARN) of the job. If the status is FAILED, the response includes a failureReason key, which describes why the job failed.

Related APIs

" + }, "CreateDataset":{ "name":"CreateDataset", "http":{ @@ -458,6 +477,21 @@ "documentation":"

Describes the given campaign, including its status.

A campaign can be in one of the following states:

When the status is CREATE FAILED, the response includes the failureReason key, which describes why.

For more information on campaigns, see CreateCampaign.

", "idempotent":true }, + "DescribeDataDeletionJob":{ + "name":"DescribeDataDeletionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDataDeletionJobRequest"}, + "output":{"shape":"DescribeDataDeletionJobResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Describes the data deletion job created by CreateDataDeletionJob, including the job status.

", + "idempotent":true + }, "DescribeDataset":{ "name":"DescribeDataset", "http":{ @@ -712,6 +746,21 @@ "documentation":"

Returns a list of campaigns that use the given solution. When a solution is not specified, all the campaigns associated with the account are listed. The response provides the properties for each campaign, including the Amazon Resource Name (ARN). For more information on campaigns, see CreateCampaign.

", "idempotent":true }, + "ListDataDeletionJobs":{ + "name":"ListDataDeletionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDataDeletionJobsRequest"}, + "output":{"shape":"ListDataDeletionJobsResponse"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"InvalidNextTokenException"} + ], + "documentation":"

Returns a list of data deletion jobs for a dataset group ordered by creation time, with the most recent first. When a dataset group is not specified, all the data deletion jobs associated with the account are listed. The response provides the properties for each job, including the Amazon Resource Name (ARN). For more information on data deletion jobs, see Deleting users.

", + "idempotent":true + }, "ListDatasetExportJobs":{ "name":"ListDatasetExportJobs", "http":{ @@ -1789,6 +1838,46 @@ } } }, + "CreateDataDeletionJobRequest":{ + "type":"structure", + "required":[ + "jobName", + "datasetGroupArn", + "dataSource", + "roleArn" + ], + "members":{ + "jobName":{ + "shape":"Name", + "documentation":"

The name for the data deletion job.

" + }, + "datasetGroupArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset group that has the datasets you want to delete records from.

" + }, + "dataSource":{ + "shape":"DataSource", + "documentation":"

The Amazon S3 bucket that contains the list of userIds of the users to delete.

" + }, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that has permissions to read from the Amazon S3 data source.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

A list of tags to apply to the data deletion job.

" + } + } + }, + "CreateDataDeletionJobResponse":{ + "type":"structure", + "members":{ + "dataDeletionJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the data deletion job.

" + } + } + }, "CreateDatasetExportJobRequest":{ "type":"structure", "required":[ @@ -2219,15 +2308,97 @@ } } }, + "DataDeletionJob":{ + "type":"structure", + "members":{ + "jobName":{ + "shape":"Name", + "documentation":"

The name of the data deletion job.

" + }, + "dataDeletionJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the data deletion job.

" + }, + "datasetGroupArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset group the job deletes records from.

" + }, + "dataSource":{"shape":"DataSource"}, + "roleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that has permissions to read from the Amazon S3 data source.

" + }, + "status":{ + "shape":"Status", + "documentation":"

The status of the data deletion job.

A data deletion job can have one of the following statuses:

" + }, + "numDeleted":{ + "shape":"Integer", + "documentation":"

The number of records deleted by a COMPLETED job.

" + }, + "creationDateTime":{ + "shape":"Date", + "documentation":"

The creation date and time (in Unix time) of the data deletion job.

" + }, + "lastUpdatedDateTime":{ + "shape":"Date", + "documentation":"

The date and time (in Unix time) the data deletion job was last updated.

" + }, + "failureReason":{ + "shape":"FailureReason", + "documentation":"

If a data deletion job fails, provides the reason why.

" + } + }, + "documentation":"

Describes a job that deletes all references to specific users from an Amazon Personalize dataset group in batches. For information about creating a data deletion job, see Deleting users.

" + }, + "DataDeletionJobSummary":{ + "type":"structure", + "members":{ + "dataDeletionJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the data deletion job.

" + }, + "datasetGroupArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset group the job deleted records from.

" + }, + "jobName":{ + "shape":"Name", + "documentation":"

The name of the data deletion job.

" + }, + "status":{ + "shape":"Status", + "documentation":"

The status of the data deletion job.

A data deletion job can have one of the following statuses:

" + }, + "creationDateTime":{ + "shape":"Date", + "documentation":"

The creation date and time (in Unix time) of the data deletion job.

" + }, + "lastUpdatedDateTime":{ + "shape":"Date", + "documentation":"

The date and time (in Unix time) the data deletion job was last updated.

" + }, + "failureReason":{ + "shape":"FailureReason", + "documentation":"

If a data deletion job fails, provides the reason why.

" + } + }, + "documentation":"

Provides a summary of the properties of a data deletion job. For a complete listing, call the DescribeDataDeletionJob API operation.

" + }, + "DataDeletionJobs":{ + "type":"list", + "member":{"shape":"DataDeletionJobSummary"}, + "max":100 + }, "DataSource":{ "type":"structure", "members":{ "dataLocation":{ "shape":"S3Location", - "documentation":"

The path to the Amazon S3 bucket where the data that you want to upload to your dataset is stored. For example:

s3://bucket-name/folder-name/

" + "documentation":"

For dataset import jobs, the path to the Amazon S3 bucket where the data that you want to upload to your dataset is stored. For data deletion jobs, the path to the Amazon S3 bucket that stores the list of records to delete.

For example:

s3://bucket-name/folder-name/fileName.csv

If your CSV files are in a folder in your Amazon S3 bucket and you want your import job or data deletion job to consider multiple files, you can specify the path to the folder. With a data deletion job, Amazon Personalize uses all files in the folder and any sub folder. Use the following syntax with a / after the folder name:

s3://bucket-name/folder-name/

" } }, - "documentation":"

Describes the data source that contains the data to upload to a dataset.

" + "documentation":"

Describes the data source that contains the data to upload to a dataset, or the list of records to delete from Amazon Personalize.

" }, "Dataset":{ "type":"structure", @@ -2917,6 +3088,25 @@ } } }, + "DescribeDataDeletionJobRequest":{ + "type":"structure", + "required":["dataDeletionJobArn"], + "members":{ + "dataDeletionJobArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the data deletion job.

" + } + } + }, + "DescribeDataDeletionJobResponse":{ + "type":"structure", + "members":{ + "dataDeletionJob":{ + "shape":"DataDeletionJob", + "documentation":"

Information about the data deletion job, including the status.

The status is one of the following values:

" + } + } + }, "DescribeDatasetExportJobRequest":{ "type":"structure", "required":["datasetExportJobArn"], @@ -3517,6 +3707,7 @@ "ALL" ] }, + "Integer":{"type":"integer"}, "IntegerHyperParameterRange":{ "type":"structure", "members":{ @@ -3672,6 +3863,36 @@ } } }, + "ListDataDeletionJobsRequest":{ + "type":"structure", + "members":{ + "datasetGroupArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset group to list data deletion jobs for.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token returned from the previous call to ListDataDeletionJobs for getting the next set of jobs (if they exist).

" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of data deletion jobs to return.

" + } + } + }, + "ListDataDeletionJobsResponse":{ + "type":"structure", + "members":{ + "dataDeletionJobs":{ + "shape":"DataDeletionJobs", + "documentation":"

The list of data deletion jobs.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A token for getting the next set of data deletion jobs (if they exist).

" + } + } + }, "ListDatasetExportJobsRequest":{ "type":"structure", "members":{ diff --git a/botocore/data/redshift-serverless/2021-04-21/service-2.json b/botocore/data/redshift-serverless/2021-04-21/service-2.json index 8dc9f0454c..170fcd5185 100644 --- a/botocore/data/redshift-serverless/2021-04-21/service-2.json +++ b/botocore/data/redshift-serverless/2021-04-21/service-2.json @@ -975,7 +975,7 @@ "members":{ "parameterKey":{ "shape":"ParameterKey", - "documentation":"

The key of the parameter. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" + "documentation":"

The key of the parameter. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" }, "parameterValue":{ "shape":"ParameterValue", @@ -1377,7 +1377,7 @@ }, "configParameters":{ "shape":"ConfigParameterList", - "documentation":"

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" + "documentation":"

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" }, "enhancedVpcRouting":{ "shape":"Boolean", @@ -2203,7 +2203,7 @@ }, "scheduledActions":{ "shape":"ScheduledActionsList", - "documentation":"

All of the returned scheduled action objects.

" + "documentation":"

All of the returned scheduled action association objects.

" } } }, @@ -2888,6 +2888,20 @@ "documentation":"

The schedule of when Amazon Redshift Serverless should run the scheduled action.

", "union":true }, + "ScheduledActionAssociation":{ + "type":"structure", + "members":{ + "namespaceName":{ + "shape":"NamespaceName", + "documentation":"

Name of associated Amazon Redshift Serverless namespace.

" + }, + "scheduledActionName":{ + "shape":"ScheduledActionName", + "documentation":"

Name of associated scheduled action.

" + } + }, + "documentation":"

Contains names of objects associated with a scheduled action.

" + }, "ScheduledActionName":{ "type":"string", "max":60, @@ -2943,7 +2957,7 @@ }, "ScheduledActionsList":{ "type":"list", - "member":{"shape":"ScheduledActionName"} + "member":{"shape":"ScheduledActionAssociation"} }, "SecurityGroupId":{"type":"string"}, "SecurityGroupIdList":{ @@ -3561,7 +3575,7 @@ }, "configParameters":{ "shape":"ConfigParameterList", - "documentation":"

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" + "documentation":"

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" }, "enhancedVpcRouting":{ "shape":"Boolean", @@ -3733,7 +3747,7 @@ }, "configParameters":{ "shape":"ConfigParameterList", - "documentation":"

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" + "documentation":"

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" }, "creationDate":{ "shape":"SyntheticTimestamp_date_time", @@ -3781,7 +3795,7 @@ }, "publiclyAccessible":{ "shape":"Boolean", - "documentation":"

A value that specifies whether the workgroup can be accessible from a public network

" + "documentation":"

A value that specifies whether the workgroup can be accessible from a public network.

" }, "securityGroupIds":{ "shape":"SecurityGroupIdList", diff --git a/docs/source/conf.py b/docs/source/conf.py index f3a2bfd172..7e7d123a59 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.34.' # The full version, including alpha/beta/rc tags. -release = '1.34.96' +release = '1.34.97' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.