diff --git a/CHANGELOG.md b/CHANGELOG.md index 55aa959af4..c98471fafa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +Release v1.44.42 (2022-06-24) +=== + +### Service Client Updates +* `service/glue`: Updates service API and documentation + * This release enables the new ListCrawls API for viewing the AWS Glue Crawler run history. +* `service/rds-data`: Updates service documentation + Release v1.44.41 (2022-06-23) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 4a1a0f9ef8..9deff77197 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -16792,43 +16792,6 @@ var awsPartition = partition{ }, }, }, - "redshift-serverless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "rekognition": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/aws/version.go b/aws/version.go index 17e3362c19..a8a2ed5721 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.41" +const SDKVersion = "1.44.42" diff --git a/models/apis/glue/2017-03-31/api-2.json b/models/apis/glue/2017-03-31/api-2.json index e2fbc0cbaa..81bba79329 100644 --- a/models/apis/glue/2017-03-31/api-2.json +++ b/models/apis/glue/2017-03-31/api-2.json @@ -1953,6 +1953,20 @@ {"shape":"OperationTimeoutException"} ] }, + "ListCrawls":{ + "name":"ListCrawls", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCrawlsRequest"}, + "output":{"shape":"ListCrawlsResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"InvalidInputException"} + ] + }, "ListCustomEntityTypes":{ "name":"ListCustomEntityTypes", "http":{ @@ -4191,6 +4205,7 @@ "LogStream":{"shape":"LogStream"} } }, + "CrawlId":{"type":"string"}, "CrawlList":{ "type":"list", "member":{"shape":"Crawl"} @@ -4231,6 +4246,34 @@ } }, "CrawlerConfiguration":{"type":"string"}, + "CrawlerHistory":{ + "type":"structure", + "members":{ + "CrawlId":{"shape":"CrawlId"}, + "State":{"shape":"CrawlerHistoryState"}, + "StartTime":{"shape":"Timestamp"}, + "EndTime":{"shape":"Timestamp"}, + "Summary":{"shape":"NameString"}, + "ErrorMessage":{"shape":"DescriptionString"}, + "LogGroup":{"shape":"LogGroup"}, + "LogStream":{"shape":"LogStream"}, + "MessagePrefix":{"shape":"MessagePrefix"}, + "DPUHour":{"shape":"NonNegativeDouble"} + } + }, + "CrawlerHistoryList":{ + "type":"list", + "member":{"shape":"CrawlerHistory"} + }, + "CrawlerHistoryState":{ + "type":"string", + "enum":[ + "RUNNING", + "COMPLETED", + "FAILED", + "STOPPED" + ] + }, "CrawlerLineageSettings":{ "type":"string", "enum":[ @@ -4316,6 +4359,18 @@ "DeltaTargets":{"shape":"DeltaTargetList"} } }, + "CrawlsFilter":{ + "type":"structure", + "members":{ + "FieldName":{"shape":"FieldName"}, + "FilterOperator":{"shape":"FilterOperator"}, + "FieldValue":{"shape":"GenericString"} + } + }, + "CrawlsFilterList":{ + "type":"list", + "member":{"shape":"CrawlsFilter"} + }, "CreateBlueprintRequest":{ "type":"structure", "required":[ @@ -5720,6 +5775,16 @@ "type":"string", "pattern":"[\\s\\S]*" }, + "FieldName":{ + "type":"string", + "enum":[ + "CRAWL_ID", + "STATE", + "START_TIME", + "END_TIME", + "DPU_HOUR" + ] + }, "FieldType":{"type":"string"}, "FillMissingValues":{ "type":"structure", @@ -5785,6 +5850,17 @@ "ISNULL" ] }, + "FilterOperator":{ + "type":"string", + "enum":[ + "GT", + "GE", + "LT", + "LE", + "EQ", + "NE" + ] + }, "FilterString":{ "type":"string", "max":2048, @@ -7689,6 +7765,23 @@ "NextToken":{"shape":"Token"} } }, + "ListCrawlsRequest":{ + "type":"structure", + "required":["CrawlerName"], + "members":{ + "CrawlerName":{"shape":"NameString"}, + "MaxResults":{"shape":"PageSize"}, + "Filters":{"shape":"CrawlsFilterList"}, + "NextToken":{"shape":"Token"} + } + }, + "ListCrawlsResponse":{ + "type":"structure", + "members":{ + "Crawls":{"shape":"CrawlerHistoryList"}, + "NextToken":{"shape":"Token"} + } + }, "ListCustomEntityTypesRequest":{ "type":"structure", "members":{ diff --git a/models/apis/glue/2017-03-31/docs-2.json b/models/apis/glue/2017-03-31/docs-2.json index 5926a2bd07..2051ffd6ad 100644 --- a/models/apis/glue/2017-03-31/docs-2.json +++ b/models/apis/glue/2017-03-31/docs-2.json @@ -128,6 +128,7 @@ "ImportCatalogToGlue": "

Imports an existing Amazon Athena Data Catalog to Glue.

", "ListBlueprints": "

Lists all the blueprint names in an account.

", "ListCrawlers": "

Retrieves the names of all crawler resources in this Amazon Web Services account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.

This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.

", + "ListCrawls": "

Returns all the crawls of a specified crawler. Returns only the crawls that have occurred since the launch date of the crawler history feature, and only retains up to 12 months of crawls. Older crawls will not be returned.

You may use this API to:

", "ListCustomEntityTypes": "

Lists all the custom patterns that have been created.

", "ListDevEndpoints": "

Retrieves the names of all DevEndpoint resources in this Amazon Web Services account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.

This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.

", "ListJobs": "

Retrieves the names of all job resources in this Amazon Web Services account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.

This operation takes the optional Tags field, which you can use as a filter on the response so that tagged resources can be retrieved as a group. If you choose to use tags filtering, only resources with the tag are retrieved.

", @@ -1331,6 +1332,12 @@ "CrawlList$member": null } }, + "CrawlId": { + "base": null, + "refs": { + "CrawlerHistory$CrawlId": "

A UUID identifier for each crawl.

" + } + }, "CrawlList": { "base": null, "refs": { @@ -1359,6 +1366,24 @@ "UpdateCrawlerRequest$Configuration": "

Crawler configuration information. This versioned JSON string allows users to specify aspects of a crawler's behavior. For more information, see Configuring a Crawler.

" } }, + "CrawlerHistory": { + "base": "

Contains the information for a run of a crawler.

", + "refs": { + "CrawlerHistoryList$member": null + } + }, + "CrawlerHistoryList": { + "base": null, + "refs": { + "ListCrawlsResponse$Crawls": "

A list of CrawlerHistory objects representing the crawl runs that meet your criteria.

" + } + }, + "CrawlerHistoryState": { + "base": null, + "refs": { + "CrawlerHistory$State": "

The state of the crawl.

" + } + }, "CrawlerLineageSettings": { "base": null, "refs": { @@ -1436,6 +1461,18 @@ "UpdateCrawlerRequest$Targets": "

A list of targets to crawl.

" } }, + "CrawlsFilter": { + "base": "

A list of fields, comparators and value that you can use to filter the crawler runs for a specified crawler.

", + "refs": { + "CrawlsFilterList$member": null + } + }, + "CrawlsFilterList": { + "base": null, + "refs": { + "ListCrawlsRequest$Filters": "

Filters the crawls by the criteria you specify in a list of CrawlsFilter objects.

" + } + }, "CreateBlueprintRequest": { "base": null, "refs": { @@ -2130,6 +2167,7 @@ "ConnectionInput$Description": "

The description of the connection.

", "Crawl$ErrorMessage": "

The error message associated with the crawl.

", "Crawler$Description": "

A description of the crawler.

", + "CrawlerHistory$ErrorMessage": "

If an error occurred, the error message associated with the crawl.

", "CreateCrawlerRequest$Description": "

A description of the new crawler.

", "CreateJobRequest$Description": "

Description of the job being defined.

", "CreateMLTransformRequest$Description": "

A description of the machine learning transform that is being defined. The default is an empty string.

", @@ -2564,6 +2602,12 @@ "CustomCode$Code": "

The custom code that is used to perform the data transformation.

" } }, + "FieldName": { + "base": null, + "refs": { + "CrawlsFilter$FieldName": "

A key used to filter the crawler runs for a specified crawler. Valid values for each of the field names are:

" + } + }, "FieldType": { "base": null, "refs": { @@ -2607,6 +2651,12 @@ "FilterExpression$Operation": "

The type of operation to perform in the expression.

" } }, + "FilterOperator": { + "base": null, + "refs": { + "CrawlsFilter$FilterOperator": "

A defined comparator that operates on the value. The available operators are:

" + } + }, "FilterString": { "base": null, "refs": { @@ -2706,6 +2756,7 @@ "AdditionalPlanOptionsMap$value": null, "Blueprint$BlueprintLocation": "

Specifies the path in Amazon S3 where the blueprint is published.

", "Blueprint$BlueprintServiceLocation": "

Specifies a path in Amazon S3 where the blueprint is copied when you call CreateBlueprint/UpdateBlueprint to register the blueprint in Glue.

", + "CrawlsFilter$FieldValue": "

The value provided for comparison on the crawl field.

", "CreateDevEndpointRequest$EndpointName": "

The name to be assigned to the new DevEndpoint.

", "CreateDevEndpointRequest$SubnetId": "

The subnet ID for the new DevEndpoint to use.

", "CreateDevEndpointRequest$PublicKey": "

The public key to be used by this DevEndpoint for authentication. This attribute is provided for backward compatibility because the recommended attribute to use is public keys.

", @@ -3992,11 +4043,11 @@ } }, "LakeFormationConfiguration": { - "base": "

Specifies AWS Lake Formation configuration settings for the crawler.

", + "base": "

Specifies Lake Formation configuration settings for the crawler.

", "refs": { - "Crawler$LakeFormationConfiguration": "

Specifies whether the crawler should use AWS Lake Formation credentials for the crawler instead of the IAM role credentials.

", - "CreateCrawlerRequest$LakeFormationConfiguration": null, - "UpdateCrawlerRequest$LakeFormationConfiguration": null + "Crawler$LakeFormationConfiguration": "

Specifies whether the crawler should use Lake Formation credentials for the crawler instead of the IAM role credentials.

", + "CreateCrawlerRequest$LakeFormationConfiguration": "

Specifies Lake Formation configuration settings for the crawler.

", + "UpdateCrawlerRequest$LakeFormationConfiguration": "

Specifies Lake Formation configuration settings for the crawler.

" } }, "Language": { @@ -4072,6 +4123,16 @@ "refs": { } }, + "ListCrawlsRequest": { + "base": null, + "refs": { + } + }, + "ListCrawlsResponse": { + "base": null, + "refs": { + } + }, "ListCustomEntityTypesRequest": { "base": null, "refs": { @@ -4212,6 +4273,7 @@ "base": null, "refs": { "Crawl$LogGroup": "

The log group associated with the crawl.

", + "CrawlerHistory$LogGroup": "

The log group associated with the crawl.

", "LastCrawlInfo$LogGroup": "

The log group for the last crawl.

" } }, @@ -4219,6 +4281,7 @@ "base": null, "refs": { "Crawl$LogStream": "

The log stream associated with the crawl.

", + "CrawlerHistory$LogStream": "

The log stream associated with the crawl.

", "LastCrawlInfo$LogStream": "

The log stream for the last crawl.

" } }, @@ -4364,6 +4427,7 @@ "MessagePrefix": { "base": null, "refs": { + "CrawlerHistory$MessagePrefix": "

The prefix for a CloudWatch message about this crawl.

", "LastCrawlInfo$MessagePrefix": "

The prefix for a message about this crawl.

" } }, @@ -4532,6 +4596,7 @@ "ConnectionPasswordEncryption$AwsKmsKeyId": "

An KMS key that is used to encrypt the connection password.

If connection password protection is enabled, the caller of CreateConnection and UpdateConnection needs at least kms:Encrypt permission on the specified KMS key, to encrypt passwords before storing them in the Data Catalog.

You can set the decrypt permission to enable or restrict access on the password key according to your security requirements.

", "ContextWords$member": null, "Crawler$Name": "

The name of the crawler.

", + "CrawlerHistory$Summary": "

A run summary for the specific crawl in JSON. Contains the catalog tables and partitions that were added, updated, or deleted.

", "CrawlerMetrics$CrawlerName": "

The name of the crawler.

", "CrawlerNameList$member": null, "CreateBlueprintResponse$Name": "

Returns the name of the blueprint that was registered.

", @@ -4680,6 +4745,7 @@ "JsonClassifier$Name": "

The name of the classifier.

", "KeyList$member": null, "KeySchemaElement$Name": "

The name of a partition key.

", + "ListCrawlsRequest$CrawlerName": "

The name of the crawler whose runs you want to retrieve.

", "ListStatementsRequest$SessionId": "

The Session ID of the statements.

", "ListTriggersRequest$DependentJobName": "

The name of the job for which to retrieve triggers. The trigger that can start this job is returned. If there is no such trigger, all triggers are returned.

", "MLTransform$Name": "

A user-defined name for the machine learning transform. Names are not guaranteed unique and can be changed at any time.

", @@ -4886,6 +4952,7 @@ "base": null, "refs": { "BinaryColumnStatisticsData$AverageLength": "

The average bit sequence length in the column.

", + "CrawlerHistory$DPUHour": "

The number of data processing units (DPU) used in hours for the crawl.

", "CrawlerMetrics$TimeLeftSeconds": "

The estimated time left to complete a running crawl.

", "CrawlerMetrics$LastRuntimeSeconds": "

The duration of the crawler's most recent run, in seconds.

", "CrawlerMetrics$MedianRuntimeSeconds": "

The median duration of this crawler's runs, in seconds.

", @@ -4984,7 +5051,7 @@ "GetWorkflowRequest$IncludeGraph": "

Specifies whether to include a graph when returning the workflow resource metadata.

", "GetWorkflowRunRequest$IncludeGraph": "

Specifies whether to include the workflow graph in response or not.

", "GetWorkflowRunsRequest$IncludeGraph": "

Specifies whether to include the workflow graph in response or not.

", - "LakeFormationConfiguration$UseLakeFormationCredentials": "

Specifies whether to use AWS Lake Formation credentials for the crawler instead of the IAM role credentials.

", + "LakeFormationConfiguration$UseLakeFormationCredentials": "

Specifies whether to use Lake Formation credentials for the crawler instead of the IAM role credentials.

", "MongoDBTarget$ScanAll": "

Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table.

A value of true means to scan all records, while a value of false means to sample the records. If no value is specified, the value defaults to true.

", "UpdateCsvClassifierRequest$DisableValueTrimming": "

Specifies not to trim values before identifying the type of column values. The default value is true.

", "UpdateCsvClassifierRequest$AllowSingleColumn": "

Enables the processing of files that contain only one column.

" @@ -5214,6 +5281,7 @@ "GetWorkflowRunsRequest$MaxResults": "

The maximum number of workflow runs to be included in the response.

", "ListBlueprintsRequest$MaxResults": "

The maximum size of a list to return.

", "ListCrawlersRequest$MaxResults": "

The maximum size of a list to return.

", + "ListCrawlsRequest$MaxResults": "

The maximum number of results to return. The default is 20, and maximum is 100.

", "ListCustomEntityTypesRequest$MaxResults": "

The maximum number of results to return.

", "ListDevEndpointsRequest$MaxResults": "

The maximum size of a list to return.

", "ListJobsRequest$MaxResults": "

The maximum size of a list to return.

", @@ -6765,6 +6833,8 @@ "Connection$LastUpdatedTime": "

The last time that this connection definition was updated.

", "Crawler$CreationTime": "

The time that the crawler was created.

", "Crawler$LastUpdated": "

The time that the crawler was last updated.

", + "CrawlerHistory$StartTime": "

The date and time on which the crawl started.

", + "CrawlerHistory$EndTime": "

The date and time on which the crawl ended.

", "CsvClassifier$CreationTime": "

The time that this classifier was registered.

", "CsvClassifier$LastUpdated": "

The time that this classifier was last updated.

", "Database$CreateTime": "

The time at which the metadata database was created in the catalog.

", @@ -6870,6 +6940,8 @@ "GetUserDefinedFunctionsResponse$NextToken": "

A continuation token, if the list of functions returned does not include the last requested function.

", "ListCrawlersRequest$NextToken": "

A continuation token, if this is a continuation request.

", "ListCrawlersResponse$NextToken": "

A continuation token, if the returned list does not contain the last metric available.

", + "ListCrawlsRequest$NextToken": "

A continuation token, if this is a continuation call.

", + "ListCrawlsResponse$NextToken": "

A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.

", "SearchTablesRequest$NextToken": "

A continuation token, included if this is a continuation call.

", "SearchTablesResponse$NextToken": "

A continuation token, present if the current list segment is not the last.

" } diff --git a/models/apis/rds-data/2018-08-01/docs-2.json b/models/apis/rds-data/2018-08-01/docs-2.json index bd6146ba1f..4d34c07df2 100644 --- a/models/apis/rds-data/2018-08-01/docs-2.json +++ b/models/apis/rds-data/2018-08-01/docs-2.json @@ -2,11 +2,11 @@ "version": "2.0", "service": "

Amazon RDS Data Service

Amazon RDS provides an HTTP endpoint to run SQL statements on an Amazon Aurora Serverless DB cluster. To run these statements, you work with the Data Service API.

For more information about the Data Service API, see Using the Data API in the Amazon Aurora User Guide.

", "operations": { - "BatchExecuteStatement": "

Runs a batch SQL statement over an array of data.

You can run bulk update and insert operations for multiple records using a DML statement with different parameter sets. Bulk operations can provide a significant performance improvement over individual insert and update operations.

If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically.

", + "BatchExecuteStatement": "

Runs a batch SQL statement over an array of data.

You can run bulk update and insert operations for multiple records using a DML statement with different parameter sets. Bulk operations can provide a significant performance improvement over individual insert and update operations.

If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically.

There isn't a fixed upper limit on the number of parameter sets. However, the maximum size of the HTTP request submitted through the Data API is 4 MiB. If the request exceeds this limit, the Data API returns an error and doesn't process the request. This 4-MiB limit includes the size of the HTTP headers and the JSON notation in the request. Thus, the number of parameter sets that you can include depends on a combination of factors, such as the size of the SQL statement and the size of each parameter set.

The response size limit is 1 MiB. If the call returns more than 1 MiB of response data, the call is terminated.

", "BeginTransaction": "

Starts a SQL transaction.

 <important> <p>A transaction can run for a maximum of 24 hours. A transaction is terminated and rolled back automatically after 24 hours.</p> <p>A transaction times out if no calls use its transaction ID in three minutes. If a transaction times out before it's committed, it's rolled back automatically.</p> <p>DDL statements inside a transaction cause an implicit commit. We recommend that you run each DDL statement in a separate <code>ExecuteStatement</code> call with <code>continueAfterTimeout</code> enabled.</p> </important> 
", "CommitTransaction": "

Ends a SQL transaction started with the BeginTransaction operation and commits the changes.

", "ExecuteSql": "

Runs one or more SQL statements.

This operation is deprecated. Use the BatchExecuteStatement or ExecuteStatement operation.

", - "ExecuteStatement": "

Runs a SQL statement against a database.

If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically.

If the binary response data from the database is more than 1 MB, the call is terminated.

", + "ExecuteStatement": "

Runs a SQL statement against a database.

If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically.

If the binary response data from the database is more than 1 MB, the call is terminated.

", "RollbackTransaction": "

Performs a rollback of a transaction. Rolling back a transaction cancels its changes.

" }, "shapes": { @@ -19,15 +19,15 @@ "base": null, "refs": { "BatchExecuteStatementRequest$resourceArn": "

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

", - "BatchExecuteStatementRequest$secretArn": "

The name or ARN of the secret that enables access to the DB cluster.

", + "BatchExecuteStatementRequest$secretArn": "

The ARN of the secret that enables access to the DB cluster. Enter the database user name and password for the credentials in the secret.

For information about creating the secret, see Create a database secret.

", "BeginTransactionRequest$resourceArn": "

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

", "BeginTransactionRequest$secretArn": "

The name or ARN of the secret that enables access to the DB cluster.

", "CommitTransactionRequest$resourceArn": "

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

", "CommitTransactionRequest$secretArn": "

The name or ARN of the secret that enables access to the DB cluster.

", - "ExecuteSqlRequest$awsSecretStoreArn": "

The Amazon Resource Name (ARN) of the secret that enables access to the DB cluster.

", + "ExecuteSqlRequest$awsSecretStoreArn": "

The Amazon Resource Name (ARN) of the secret that enables access to the DB cluster. Enter the database user name and password for the credentials in the secret.

For information about creating the secret, see Create a database secret.

", "ExecuteSqlRequest$dbClusterOrInstanceArn": "

The ARN of the Aurora Serverless DB cluster.

", "ExecuteStatementRequest$resourceArn": "

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

", - "ExecuteStatementRequest$secretArn": "

The name or ARN of the secret that enables access to the DB cluster.

", + "ExecuteStatementRequest$secretArn": "

The ARN of the secret that enables access to the DB cluster. Enter the database user name and password for the credentials in the secret.

For information about creating the secret, see Create a database secret.

", "RollbackTransactionRequest$resourceArn": "

The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.

", "RollbackTransactionRequest$secretArn": "

The name or ARN of the secret that enables access to the DB cluster.

" } @@ -390,7 +390,7 @@ "SqlStatement": { "base": null, "refs": { - "BatchExecuteStatementRequest$sql": "

The SQL statement to run.

", + "BatchExecuteStatementRequest$sql": "

The SQL statement to run. Don't include a semicolon (;) at the end of the SQL statement.

", "ExecuteSqlRequest$sqlStatements": "

One or more SQL statements to run on the DB cluster.

You can separate SQL statements from each other with a semicolon (;). Any valid SQL statement is permitted, including data definition, data manipulation, and commit statements.

", "ExecuteStatementRequest$sql": "

The SQL statement to run.

" } diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 5c173af813..704a134e19 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -9815,21 +9815,6 @@ } } }, - "redshift-serverless" : { - "endpoints" : { - "ap-northeast-1" : { }, - "ap-northeast-2" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "eu-central-1" : { }, - "eu-north-1" : { }, - "eu-west-1" : { }, - "eu-west-2" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-2" : { } - } - }, "rekognition" : { "endpoints" : { "ap-northeast-1" : { }, diff --git a/service/glue/api.go b/service/glue/api.go index 188a25e6b3..79b5f15c40 100644 --- a/service/glue/api.go +++ b/service/glue/api.go @@ -12782,6 +12782,104 @@ func (c *Glue) ListCrawlersPagesWithContext(ctx aws.Context, input *ListCrawlers return p.Err() } +const opListCrawls = "ListCrawls" + +// ListCrawlsRequest generates a "aws/request.Request" representing the +// client's request for the ListCrawls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListCrawls for more information on using the ListCrawls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListCrawlsRequest method. +// req, resp := client.ListCrawlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCrawls +func (c *Glue) ListCrawlsRequest(input *ListCrawlsInput) (req *request.Request, output *ListCrawlsOutput) { + op := &request.Operation{ + Name: opListCrawls, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListCrawlsInput{} + } + + output = &ListCrawlsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListCrawls API operation for AWS Glue. +// +// Returns all the crawls of a specified crawler. Returns only the crawls that +// have occurred since the launch date of the crawler history feature, and only +// retains up to 12 months of crawls. Older crawls will not be returned. +// +// You may use this API to: +// +// * Retrive all the crawls of a specified crawler. +// +// * Retrieve all the crawls of a specified crawler within a limited count. +// +// * Retrieve all the crawls of a specified crawler in a specific time range. +// +// * Retrieve all the crawls of a specified crawler with a particular state, +// crawl ID, or DPU hour value. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation ListCrawls for usage and error information. +// +// Returned Error Types: +// * EntityNotFoundException +// A specified entity does not exist +// +// * OperationTimeoutException +// The operation timed out. +// +// * InvalidInputException +// The input provided was not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ListCrawls +func (c *Glue) ListCrawls(input *ListCrawlsInput) (*ListCrawlsOutput, error) { + req, out := c.ListCrawlsRequest(input) + return out, req.Send() +} + +// ListCrawlsWithContext is the same as ListCrawls with the addition of +// the ability to pass a context and additional request options. +// +// See ListCrawls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) ListCrawlsWithContext(ctx aws.Context, input *ListCrawlsInput, opts ...request.Option) (*ListCrawlsOutput, error) { + req, out := c.ListCrawlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListCustomEntityTypes = "ListCustomEntityTypes" // ListCustomEntityTypesRequest generates a "aws/request.Request" representing the @@ -25291,8 +25389,8 @@ type Crawler struct { // A description of the crawler. Description *string `type:"string"` - // Specifies whether the crawler should use AWS Lake Formation credentials for - // the crawler instead of the IAM role credentials. + // Specifies whether the crawler should use Lake Formation credentials for the + // crawler instead of the IAM role credentials. LakeFormationConfiguration *LakeFormationConfiguration `type:"structure"` // The status of the last crawl, and potentially error information if an error @@ -25473,6 +25571,120 @@ func (s *Crawler) SetVersion(v int64) *Crawler { return s } +// Contains the information for a run of a crawler. +type CrawlerHistory struct { + _ struct{} `type:"structure"` + + // A UUID identifier for each crawl. + CrawlId *string `type:"string"` + + // The number of data processing units (DPU) used in hours for the crawl. + DPUHour *float64 `type:"double"` + + // The date and time on which the crawl ended. + EndTime *time.Time `type:"timestamp"` + + // If an error occurred, the error message associated with the crawl. + ErrorMessage *string `type:"string"` + + // The log group associated with the crawl. + LogGroup *string `min:"1" type:"string"` + + // The log stream associated with the crawl. + LogStream *string `min:"1" type:"string"` + + // The prefix for a CloudWatch message about this crawl. + MessagePrefix *string `min:"1" type:"string"` + + // The date and time on which the crawl started. + StartTime *time.Time `type:"timestamp"` + + // The state of the crawl. + State *string `type:"string" enum:"CrawlerHistoryState"` + + // A run summary for the specific crawl in JSON. Contains the catalog tables + // and partitions that were added, updated, or deleted. + Summary *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CrawlerHistory) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CrawlerHistory) GoString() string { + return s.String() +} + +// SetCrawlId sets the CrawlId field's value. +func (s *CrawlerHistory) SetCrawlId(v string) *CrawlerHistory { + s.CrawlId = &v + return s +} + +// SetDPUHour sets the DPUHour field's value. +func (s *CrawlerHistory) SetDPUHour(v float64) *CrawlerHistory { + s.DPUHour = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *CrawlerHistory) SetEndTime(v time.Time) *CrawlerHistory { + s.EndTime = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *CrawlerHistory) SetErrorMessage(v string) *CrawlerHistory { + s.ErrorMessage = &v + return s +} + +// SetLogGroup sets the LogGroup field's value. +func (s *CrawlerHistory) SetLogGroup(v string) *CrawlerHistory { + s.LogGroup = &v + return s +} + +// SetLogStream sets the LogStream field's value. +func (s *CrawlerHistory) SetLogStream(v string) *CrawlerHistory { + s.LogStream = &v + return s +} + +// SetMessagePrefix sets the MessagePrefix field's value. +func (s *CrawlerHistory) SetMessagePrefix(v string) *CrawlerHistory { + s.MessagePrefix = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *CrawlerHistory) SetStartTime(v time.Time) *CrawlerHistory { + s.StartTime = &v + return s +} + +// SetState sets the State field's value. +func (s *CrawlerHistory) SetState(v string) *CrawlerHistory { + s.State = &v + return s +} + +// SetSummary sets the Summary field's value. +func (s *CrawlerHistory) SetSummary(v string) *CrawlerHistory { + s.Summary = &v + return s +} + // Metrics for a specified crawler. type CrawlerMetrics struct { _ struct{} `type:"structure"` @@ -25893,6 +26105,80 @@ func (s *CrawlerTargets) SetS3Targets(v []*S3Target) *CrawlerTargets { return s } +// A list of fields, comparators and value that you can use to filter the crawler +// runs for a specified crawler. +type CrawlsFilter struct { + _ struct{} `type:"structure"` + + // A key used to filter the crawler runs for a specified crawler. Valid values + // for each of the field names are: + // + // * CRAWL_ID: A string representing the UUID identifier for a crawl. + // + // * STATE: A string representing the state of the crawl. + // + // * START_TIME and END_TIME: The epoch timestamp in milliseconds. + // + // * DPU_HOUR: The number of data processing unit (DPU) hours used for the + // crawl. + FieldName *string `type:"string" enum:"FieldName"` + + // The value provided for comparison on the crawl field. + FieldValue *string `type:"string"` + + // A defined comparator that operates on the value. The available operators + // are: + // + // * GT: Greater than. + // + // * GE: Greater than or equal to. + // + // * LT: Less than. + // + // * LE: Less than or equal to. + // + // * EQ: Equal to. + // + // * NE: Not equal to. + FilterOperator *string `type:"string" enum:"FilterOperator"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CrawlsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CrawlsFilter) GoString() string { + return s.String() +} + +// SetFieldName sets the FieldName field's value. +func (s *CrawlsFilter) SetFieldName(v string) *CrawlsFilter { + s.FieldName = &v + return s +} + +// SetFieldValue sets the FieldValue field's value. +func (s *CrawlsFilter) SetFieldValue(v string) *CrawlsFilter { + s.FieldValue = &v + return s +} + +// SetFilterOperator sets the FilterOperator field's value. +func (s *CrawlsFilter) SetFilterOperator(v string) *CrawlsFilter { + s.FilterOperator = &v + return s +} + type CreateBlueprintInput struct { _ struct{} `type:"structure"` @@ -26238,7 +26524,7 @@ type CreateCrawlerInput struct { // A description of the new crawler. Description *string `type:"string"` - // Specifies AWS Lake Formation configuration settings for the crawler. + // Specifies Lake Formation configuration settings for the crawler. LakeFormationConfiguration *LakeFormationConfiguration `type:"structure"` // Specifies data lineage configuration settings for the crawler. @@ -45865,7 +46151,7 @@ func (s *LabelingSetGenerationTaskRunProperties) SetOutputS3Path(v string) *Labe return s } -// Specifies AWS Lake Formation configuration settings for the crawler. +// Specifies Lake Formation configuration settings for the crawler. type LakeFormationConfiguration struct { _ struct{} `type:"structure"` @@ -45873,7 +46159,7 @@ type LakeFormationConfiguration struct { // data, this can be left as null. AccountId *string `type:"string"` - // Specifies whether to use AWS Lake Formation credentials for the crawler instead + // Specifies whether to use Lake Formation credentials for the crawler instead // of the IAM role credentials. UseLakeFormationCredentials *bool `type:"boolean"` } @@ -46299,6 +46585,129 @@ func (s *ListCrawlersOutput) SetNextToken(v string) *ListCrawlersOutput { return s } +type ListCrawlsInput struct { + _ struct{} `type:"structure"` + + // The name of the crawler whose runs you want to retrieve. + // + // CrawlerName is a required field + CrawlerName *string `min:"1" type:"string" required:"true"` + + // Filters the crawls by the criteria you specify in a list of CrawlsFilter + // objects. + Filters []*CrawlsFilter `type:"list"` + + // The maximum number of results to return. The default is 20, and maximum is + // 100. + MaxResults *int64 `min:"1" type:"integer"` + + // A continuation token, if this is a continuation call. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListCrawlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListCrawlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCrawlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCrawlsInput"} + if s.CrawlerName == nil { + invalidParams.Add(request.NewErrParamRequired("CrawlerName")) + } + if s.CrawlerName != nil && len(*s.CrawlerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CrawlerName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCrawlerName sets the CrawlerName field's value. +func (s *ListCrawlsInput) SetCrawlerName(v string) *ListCrawlsInput { + s.CrawlerName = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *ListCrawlsInput) SetFilters(v []*CrawlsFilter) *ListCrawlsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListCrawlsInput) SetMaxResults(v int64) *ListCrawlsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCrawlsInput) SetNextToken(v string) *ListCrawlsInput { + s.NextToken = &v + return s +} + +type ListCrawlsOutput struct { + _ struct{} `type:"structure"` + + // A list of CrawlerHistory objects representing the crawl runs that meet your + // criteria. + Crawls []*CrawlerHistory `type:"list"` + + // A continuation token for paginating the returned list of tokens, returned + // if the current segment of the list is not the last. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListCrawlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListCrawlsOutput) GoString() string { + return s.String() +} + +// SetCrawls sets the Crawls field's value. +func (s *ListCrawlsOutput) SetCrawls(v []*CrawlerHistory) *ListCrawlsOutput { + s.Crawls = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCrawlsOutput) SetNextToken(v string) *ListCrawlsOutput { + s.NextToken = &v + return s +} + type ListCustomEntityTypesInput struct { _ struct{} `type:"structure"` @@ -60541,7 +60950,7 @@ type UpdateCrawlerInput struct { // A description of the new crawler. Description *string `type:"string"` - // Specifies AWS Lake Formation configuration settings for the crawler. + // Specifies Lake Formation configuration settings for the crawler. LakeFormationConfiguration *LakeFormationConfiguration `type:"structure"` // Specifies data lineage configuration settings for the crawler. @@ -63841,6 +64250,30 @@ func CrawlState_Values() []string { } } +const ( + // CrawlerHistoryStateRunning is a CrawlerHistoryState enum value + CrawlerHistoryStateRunning = "RUNNING" + + // CrawlerHistoryStateCompleted is a CrawlerHistoryState enum value + CrawlerHistoryStateCompleted = "COMPLETED" + + // CrawlerHistoryStateFailed is a CrawlerHistoryState enum value + CrawlerHistoryStateFailed = "FAILED" + + // CrawlerHistoryStateStopped is a CrawlerHistoryState enum value + CrawlerHistoryStateStopped = "STOPPED" +) + +// CrawlerHistoryState_Values returns all elements of the CrawlerHistoryState enum +func CrawlerHistoryState_Values() []string { + return []string{ + CrawlerHistoryStateRunning, + CrawlerHistoryStateCompleted, + CrawlerHistoryStateFailed, + CrawlerHistoryStateStopped, + } +} + const ( // CrawlerLineageSettingsEnable is a CrawlerLineageSettings enum value CrawlerLineageSettingsEnable = "ENABLE" @@ -63973,6 +64406,34 @@ func ExistCondition_Values() []string { } } +const ( + // FieldNameCrawlId is a FieldName enum value + FieldNameCrawlId = "CRAWL_ID" + + // FieldNameState is a FieldName enum value + FieldNameState = "STATE" + + // FieldNameStartTime is a FieldName enum value + FieldNameStartTime = "START_TIME" + + // FieldNameEndTime is a FieldName enum value + FieldNameEndTime = "END_TIME" + + // FieldNameDpuHour is a FieldName enum value + FieldNameDpuHour = "DPU_HOUR" +) + +// FieldName_Values returns all elements of the FieldName enum +func FieldName_Values() []string { + return []string{ + FieldNameCrawlId, + FieldNameState, + FieldNameStartTime, + FieldNameEndTime, + FieldNameDpuHour, + } +} + const ( // FilterLogicalOperatorAnd is a FilterLogicalOperator enum value FilterLogicalOperatorAnd = "AND" @@ -64025,6 +64486,38 @@ func FilterOperation_Values() []string { } } +const ( + // FilterOperatorGt is a FilterOperator enum value + FilterOperatorGt = "GT" + + // FilterOperatorGe is a FilterOperator enum value + FilterOperatorGe = "GE" + + // FilterOperatorLt is a FilterOperator enum value + FilterOperatorLt = "LT" + + // FilterOperatorLe is a FilterOperator enum value + FilterOperatorLe = "LE" + + // FilterOperatorEq is a FilterOperator enum value + FilterOperatorEq = "EQ" + + // FilterOperatorNe is a FilterOperator enum value + FilterOperatorNe = "NE" +) + +// FilterOperator_Values returns all elements of the FilterOperator enum +func FilterOperator_Values() []string { + return []string{ + FilterOperatorGt, + FilterOperatorGe, + FilterOperatorLt, + FilterOperatorLe, + FilterOperatorEq, + FilterOperatorNe, + } +} + const ( // FilterValueTypeColumnextracted is a FilterValueType enum value FilterValueTypeColumnextracted = "COLUMNEXTRACTED" diff --git a/service/glue/glueiface/interface.go b/service/glue/glueiface/interface.go index 41298e6120..3ad184e9d8 100644 --- a/service/glue/glueiface/interface.go +++ b/service/glue/glueiface/interface.go @@ -633,6 +633,10 @@ type GlueAPI interface { ListCrawlersPages(*glue.ListCrawlersInput, func(*glue.ListCrawlersOutput, bool) bool) error ListCrawlersPagesWithContext(aws.Context, *glue.ListCrawlersInput, func(*glue.ListCrawlersOutput, bool) bool, ...request.Option) error + ListCrawls(*glue.ListCrawlsInput) (*glue.ListCrawlsOutput, error) + ListCrawlsWithContext(aws.Context, *glue.ListCrawlsInput, ...request.Option) (*glue.ListCrawlsOutput, error) + ListCrawlsRequest(*glue.ListCrawlsInput) (*request.Request, *glue.ListCrawlsOutput) + ListCustomEntityTypes(*glue.ListCustomEntityTypesInput) (*glue.ListCustomEntityTypesOutput, error) ListCustomEntityTypesWithContext(aws.Context, *glue.ListCustomEntityTypesInput, ...request.Option) (*glue.ListCustomEntityTypesOutput, error) ListCustomEntityTypesRequest(*glue.ListCustomEntityTypesInput) (*request.Request, *glue.ListCustomEntityTypesOutput) diff --git a/service/rdsdataservice/api.go b/service/rdsdataservice/api.go index c7236680e1..1cf3fb3a89 100644 --- a/service/rdsdataservice/api.go +++ b/service/rdsdataservice/api.go @@ -64,6 +64,17 @@ func (c *RDSDataService) BatchExecuteStatementRequest(input *BatchExecuteStateme // If a call isn't part of a transaction because it doesn't include the transactionID // parameter, changes that result from the call are committed automatically. // +// There isn't a fixed upper limit on the number of parameter sets. However, +// the maximum size of the HTTP request submitted through the Data API is 4 +// MiB. If the request exceeds this limit, the Data API returns an error and +// doesn't process the request. This 4-MiB limit includes the size of the HTTP +// headers and the JSON notation in the request. Thus, the number of parameter +// sets that you can include depends on a combination of factors, such as the +// size of the SQL statement and the size of each parameter set. +// +// The response size limit is 1 MiB. If the call returns more than 1 MiB of +// response data, the call is terminated. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -840,12 +851,16 @@ type BatchExecuteStatementInput struct { // The name of the database schema. Schema *string `locationName:"schema" type:"string"` - // The name or ARN of the secret that enables access to the DB cluster. + // The ARN of the secret that enables access to the DB cluster. Enter the database + // user name and password for the credentials in the secret. + // + // For information about creating the secret, see Create a database secret (https://docs.aws.amazon.com/secretsmanager/latest/userguide/create_database_secret.html). // // SecretArn is a required field SecretArn *string `locationName:"secretArn" min:"11" type:"string" required:"true"` - // The SQL statement to run. + // The SQL statement to run. Don't include a semicolon (;) at the end of the + // SQL statement. // // Sql is a required field Sql *string `locationName:"sql" type:"string" required:"true"` @@ -1361,7 +1376,10 @@ type ExecuteSqlInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the secret that enables access to the DB - // cluster. + // cluster. Enter the database user name and password for the credentials in + // the secret. + // + // For information about creating the secret, see Create a database secret (https://docs.aws.amazon.com/secretsmanager/latest/userguide/create_database_secret.html). // // AwsSecretStoreArn is a required field AwsSecretStoreArn *string `locationName:"awsSecretStoreArn" min:"11" type:"string" required:"true"` @@ -1541,7 +1559,10 @@ type ExecuteStatementInput struct { // Currently, the schema parameter isn't supported. Schema *string `locationName:"schema" type:"string"` - // The name or ARN of the secret that enables access to the DB cluster. + // The ARN of the secret that enables access to the DB cluster. Enter the database + // user name and password for the credentials in the secret. + // + // For information about creating the secret, see Create a database secret (https://docs.aws.amazon.com/secretsmanager/latest/userguide/create_database_secret.html). // // SecretArn is a required field SecretArn *string `locationName:"secretArn" min:"11" type:"string" required:"true"`