Skip to content

Commit

Permalink
Release v1.44.41 (2022-06-23) (#4453)
Browse files Browse the repository at this point in the history
Release v1.44.41 (2022-06-23)
===

### Service Client Updates
* `service/lookoutequipment`: Updates service API, documentation, and paginators
* `service/mediaconvert`: Updates service documentation
  * AWS Elemental MediaConvert SDK has released support for automatic DolbyVision metadata generation when converting HDR10 to DolbyVision.
* `service/mgn`: Updates service API, documentation, and paginators
* `service/migration-hub-refactor-spaces`: Updates service API and documentation
* `service/sagemaker`: Updates service API and documentation
  * SageMaker Ground Truth now supports Virtual Private Cloud. Customers can launch labeling jobs and access to their private workforce in VPC mode.
  • Loading branch information
aws-sdk-go-automation committed Jun 23, 2022
1 parent 517bd5e commit 957a9e8
Show file tree
Hide file tree
Showing 21 changed files with 3,409 additions and 126 deletions.
12 changes: 12 additions & 0 deletions CHANGELOG.md
@@ -1,3 +1,15 @@
Release v1.44.41 (2022-06-23)
===

### Service Client Updates
* `service/lookoutequipment`: Updates service API, documentation, and paginators
* `service/mediaconvert`: Updates service documentation
* AWS Elemental MediaConvert SDK has released support for automatic DolbyVision metadata generation when converting HDR10 to DolbyVision.
* `service/mgn`: Updates service API, documentation, and paginators
* `service/migration-hub-refactor-spaces`: Updates service API and documentation
* `service/sagemaker`: Updates service API and documentation
* SageMaker Ground Truth now supports Virtual Private Cloud. Customers can launch labeling jobs and access to their private workforce in VPC mode.

Release v1.44.40 (2022-06-22)
===

Expand Down
2 changes: 1 addition & 1 deletion aws/version.go
Expand Up @@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"

// SDKVersion is the version of this SDK
const SDKVersion = "1.44.40"
const SDKVersion = "1.44.41"
59 changes: 58 additions & 1 deletion models/apis/lookoutequipment/2020-12-15/api-2.json
Expand Up @@ -206,6 +206,22 @@
{"shape":"InternalServerException"}
]
},
"ListInferenceEvents":{
"name":"ListInferenceEvents",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"ListInferenceEventsRequest"},
"output":{"shape":"ListInferenceEventsResponse"},
"errors":[
{"shape":"ValidationException"},
{"shape":"ThrottlingException"},
{"shape":"ResourceNotFoundException"},
{"shape":"AccessDeniedException"},
{"shape":"InternalServerException"}
]
},
"ListInferenceExecutions":{
"name":"ListInferenceExecutions",
"http":{
Expand Down Expand Up @@ -789,6 +805,10 @@
"TotalNumberOfDuplicateTimestamps":{"shape":"Integer"}
}
},
"EventDurationInSeconds":{
"type":"long",
"min":0
},
"FileNameTimestampFormat":{
"type":"string",
"pattern":"^EPOCH|yyyy-MM-dd-HH-mm-ss|yyyyMMddHHmmss$"
Expand All @@ -806,6 +826,21 @@
"min":1,
"pattern":"\\p{ASCII}{1,256}"
},
"InferenceEventSummaries":{
"type":"list",
"member":{"shape":"InferenceEventSummary"}
},
"InferenceEventSummary":{
"type":"structure",
"members":{
"InferenceSchedulerArn":{"shape":"InferenceSchedulerArn"},
"InferenceSchedulerName":{"shape":"InferenceSchedulerName"},
"EventStartTime":{"shape":"Timestamp"},
"EventEndTime":{"shape":"Timestamp"},
"Diagnostics":{"shape":"ModelMetrics"},
"EventDurationInSeconds":{"shape":"EventDurationInSeconds"}
}
},
"InferenceExecutionStatus":{
"type":"string",
"enum":[
Expand Down Expand Up @@ -1061,6 +1096,28 @@
"DatasetSummaries":{"shape":"DatasetSummaries"}
}
},
"ListInferenceEventsRequest":{
"type":"structure",
"required":[
"InferenceSchedulerName",
"IntervalStartTime",
"IntervalEndTime"
],
"members":{
"NextToken":{"shape":"NextToken"},
"MaxResults":{"shape":"MaxResults"},
"InferenceSchedulerName":{"shape":"InferenceSchedulerIdentifier"},
"IntervalStartTime":{"shape":"Timestamp"},
"IntervalEndTime":{"shape":"Timestamp"}
}
},
"ListInferenceEventsResponse":{
"type":"structure",
"members":{
"NextToken":{"shape":"NextToken"},
"InferenceEventSummaries":{"shape":"InferenceEventSummaries"}
}
},
"ListInferenceExecutionsRequest":{
"type":"structure",
"required":["InferenceSchedulerName"],
Expand Down Expand Up @@ -1285,7 +1342,7 @@
"type":"string",
"max":1024,
"min":0,
"pattern":"(^$)|([\\P{M}\\p{M}]{1,1023}/$)"
"pattern":"(^$)|([\\u0009\\u000A\\u000D\\u0020-\\u00FF]{1,1023}/$)"
},
"SensorName":{
"type":"string",
Expand Down
44 changes: 42 additions & 2 deletions models/apis/lookoutequipment/2020-12-15/docs-2.json
Expand Up @@ -14,6 +14,7 @@
"DescribeModel": "<p>Provides a JSON containing the overall information about a specific ML model, including model name and ARN, dataset, training and evaluation information, status, and so on. </p>",
"ListDataIngestionJobs": "<p>Provides a list of all data ingestion jobs, including dataset name and ARN, S3 location of the input data, status, and so on. </p>",
"ListDatasets": "<p>Lists all datasets currently available in your account, filtering on the dataset name. </p>",
"ListInferenceEvents": "<p> Lists all inference events that have been found for the specified inference scheduler. </p>",
"ListInferenceExecutions": "<p> Lists all inference executions that have been performed by the specified inference scheduler. </p>",
"ListInferenceSchedulers": "<p>Retrieves a list of all inference schedulers currently available for your account. </p>",
"ListModels": "<p>Generates a list of all models in the account, including model name and ARN, dataset, and status. </p>",
Expand Down Expand Up @@ -299,6 +300,12 @@
"DataQualitySummary$DuplicateTimestamps": "<p> Parameter that gives information about duplicate timestamps in the input data. </p>"
}
},
"EventDurationInSeconds": {
"base": null,
"refs": {
"InferenceEventSummary$EventDurationInSeconds": "<p> Indicates the size of an inference event in seconds. </p>"
}
},
"FileNameTimestampFormat": {
"base": null,
"refs": {
Expand Down Expand Up @@ -333,6 +340,18 @@
"StartDataIngestionJobRequest$ClientToken": "<p> A unique identifier for the request. If you do not set the client request token, Amazon Lookout for Equipment generates one. </p>"
}
},
"InferenceEventSummaries": {
"base": null,
"refs": {
"ListInferenceEventsResponse$InferenceEventSummaries": "<p>Provides an array of information about the individual inference events returned from the <code>ListInferenceEvents</code> operation, including scheduler used, event start time, event end time, diagnostics, and so on. </p>"
}
},
"InferenceEventSummary": {
"base": "<p>Contains information about the specific inference event, including start and end time, diagnostics information, event duration and so on.</p>",
"refs": {
"InferenceEventSummaries$member": null
}
},
"InferenceExecutionStatus": {
"base": null,
"refs": {
Expand Down Expand Up @@ -393,6 +412,7 @@
"refs": {
"CreateInferenceSchedulerResponse$InferenceSchedulerArn": "<p>The Amazon Resource Name (ARN) of the inference scheduler being created. </p>",
"DescribeInferenceSchedulerResponse$InferenceSchedulerArn": "<p>The Amazon Resource Name (ARN) of the inference scheduler being described. </p>",
"InferenceEventSummary$InferenceSchedulerArn": "<p> The Amazon Resource Name (ARN) of the inference scheduler being used for the inference event. </p>",
"InferenceExecutionSummary$InferenceSchedulerArn": "<p> The Amazon Resource Name (ARN) of the inference scheduler being used for the inference execution. </p>",
"InferenceSchedulerSummary$InferenceSchedulerArn": "<p> The Amazon Resource Name (ARN) of the inference scheduler. </p>",
"StartInferenceSchedulerResponse$InferenceSchedulerArn": "<p>The Amazon Resource Name (ARN) of the inference scheduler being started. </p>",
Expand All @@ -404,6 +424,7 @@
"refs": {
"DeleteInferenceSchedulerRequest$InferenceSchedulerName": "<p>The name of the inference scheduler to be deleted. </p>",
"DescribeInferenceSchedulerRequest$InferenceSchedulerName": "<p>The name of the inference scheduler being described. </p>",
"ListInferenceEventsRequest$InferenceSchedulerName": "<p>The name of the inference scheduler for the inference events listed. </p>",
"ListInferenceExecutionsRequest$InferenceSchedulerName": "<p>The name of the inference scheduler for the inference execution listed. </p>",
"ListInferenceSchedulersRequest$InferenceSchedulerNameBeginsWith": "<p>The beginning of the name of the inference schedulers to be listed. </p>",
"StartInferenceSchedulerRequest$InferenceSchedulerName": "<p>The name of the inference scheduler to be started. </p>",
Expand All @@ -417,6 +438,7 @@
"CreateInferenceSchedulerRequest$InferenceSchedulerName": "<p>The name of the inference scheduler being created. </p>",
"CreateInferenceSchedulerResponse$InferenceSchedulerName": "<p>The name of inference scheduler being created. </p>",
"DescribeInferenceSchedulerResponse$InferenceSchedulerName": "<p>The name of the inference scheduler being described. </p>",
"InferenceEventSummary$InferenceSchedulerName": "<p>The name of the inference scheduler being used for the inference events. </p>",
"InferenceExecutionSummary$InferenceSchedulerName": "<p>The name of the inference scheduler being used for the inference execution. </p>",
"InferenceSchedulerSummary$InferenceSchedulerName": "<p>The name of the inference scheduler. </p>",
"StartInferenceSchedulerResponse$InferenceSchedulerName": "<p>The name of the inference scheduler being started. </p>",
Expand Down Expand Up @@ -583,6 +605,16 @@
"refs": {
}
},
"ListInferenceEventsRequest": {
"base": null,
"refs": {
}
},
"ListInferenceEventsResponse": {
"base": null,
"refs": {
}
},
"ListInferenceExecutionsRequest": {
"base": null,
"refs": {
Expand Down Expand Up @@ -644,6 +676,7 @@
"refs": {
"ListDataIngestionJobsRequest$MaxResults": "<p> Specifies the maximum number of data ingestion jobs to list. </p>",
"ListDatasetsRequest$MaxResults": "<p> Specifies the maximum number of datasets to list. </p>",
"ListInferenceEventsRequest$MaxResults": "<p>Specifies the maximum number of inference events to list. </p>",
"ListInferenceExecutionsRequest$MaxResults": "<p>Specifies the maximum number of inference executions to list. </p>",
"ListInferenceSchedulersRequest$MaxResults": "<p> Specifies the maximum number of inference schedulers to list. </p>",
"ListModelsRequest$MaxResults": "<p> Specifies the maximum number of ML models to list. </p>",
Expand Down Expand Up @@ -678,7 +711,8 @@
"ModelMetrics": {
"base": null,
"refs": {
"DescribeModelResponse$ModelMetrics": "<p>The Model Metrics show an aggregated summary of the model's performance within the evaluation time range. This is the JSON content of the metrics created when evaluating the model. </p>"
"DescribeModelResponse$ModelMetrics": "<p>The Model Metrics show an aggregated summary of the model's performance within the evaluation time range. This is the JSON content of the metrics created when evaluating the model. </p>",
"InferenceEventSummary$Diagnostics": "<p> An array which specifies the names and values of all sensors contributing to an inference event.</p>"
}
},
"ModelName": {
Expand Down Expand Up @@ -754,6 +788,8 @@
"ListDataIngestionJobsResponse$NextToken": "<p> An opaque pagination token indicating where to continue the listing of data ingestion jobs. </p>",
"ListDatasetsRequest$NextToken": "<p> An opaque pagination token indicating where to continue the listing of datasets. </p>",
"ListDatasetsResponse$NextToken": "<p> An opaque pagination token indicating where to continue the listing of datasets. </p>",
"ListInferenceEventsRequest$NextToken": "<p>An opaque pagination token indicating where to continue the listing of inference events.</p>",
"ListInferenceEventsResponse$NextToken": "<p>An opaque pagination token indicating where to continue the listing of inference executions. </p>",
"ListInferenceExecutionsRequest$NextToken": "<p>An opaque pagination token indicating where to continue the listing of inference executions.</p>",
"ListInferenceExecutionsResponse$NextToken": "<p> An opaque pagination token indicating where to continue the listing of inference executions. </p>",
"ListInferenceSchedulersRequest$NextToken": "<p> An opaque pagination token indicating where to continue the listing of inference schedulers. </p>",
Expand Down Expand Up @@ -949,7 +985,7 @@
"DescribeDataIngestionJobResponse$CreatedAt": "<p>The time at which the data ingestion job was created. </p>",
"DescribeDataIngestionJobResponse$DataStartTime": "<p> Indicates the earliest timestamp corresponding to data that was successfully ingested during this specific ingestion job. </p>",
"DescribeDataIngestionJobResponse$DataEndTime": "<p> Indicates the latest timestamp corresponding to data that was successfully ingested during this specific ingestion job. </p>",
"DescribeDatasetResponse$CreatedAt": "<p>Specifies the time the dataset was created in Amazon Lookout for Equipment. </p>",
"DescribeDatasetResponse$CreatedAt": "<p>Specifies the time the dataset was created in Lookout for Equipment. </p>",
"DescribeDatasetResponse$LastUpdatedAt": "<p>Specifies the time the dataset was last updated, if it was. </p>",
"DescribeDatasetResponse$DataStartTime": "<p> Indicates the earliest timestamp corresponding to data that was successfully ingested during the most recent ingestion of this particular dataset. </p>",
"DescribeDatasetResponse$DataEndTime": "<p> Indicates the latest timestamp corresponding to data that was successfully ingested during the most recent ingestion of this particular dataset. </p>",
Expand All @@ -963,9 +999,13 @@
"DescribeModelResponse$TrainingExecutionEndTime": "<p>Indicates the time at which the training of the ML model was completed. </p>",
"DescribeModelResponse$LastUpdatedTime": "<p>Indicates the last time the ML model was updated. The type of update is not specified. </p>",
"DescribeModelResponse$CreatedAt": "<p>Indicates the time and date at which the ML model was created. </p>",
"InferenceEventSummary$EventStartTime": "<p>Indicates the starting time of an inference event. </p>",
"InferenceEventSummary$EventEndTime": "<p>Indicates the ending time of an inference event. </p>",
"InferenceExecutionSummary$ScheduledStartTime": "<p>Indicates the start time at which the inference scheduler began the specific inference execution. </p>",
"InferenceExecutionSummary$DataStartTime": "<p>Indicates the time reference in the dataset at which the inference execution began. </p>",
"InferenceExecutionSummary$DataEndTime": "<p>Indicates the time reference in the dataset at which the inference execution stopped. </p>",
"ListInferenceEventsRequest$IntervalStartTime": "<p> Lookout for Equipment will return all the inference events with start time equal to or greater than the start time given.</p>",
"ListInferenceEventsRequest$IntervalEndTime": "<p>Lookout for Equipment will return all the inference events with end time equal to or less than the end time given.</p>",
"ListInferenceExecutionsRequest$DataStartTimeAfter": "<p>The time reference in the inferenced dataset after which Amazon Lookout for Equipment started the inference execution. </p>",
"ListInferenceExecutionsRequest$DataEndTimeBefore": "<p>The time reference in the inferenced dataset before which Amazon Lookout for Equipment stopped the inference execution. </p>",
"ModelSummary$CreatedAt": "<p>The time at which the specific model was created. </p>",
Expand Down
5 changes: 5 additions & 0 deletions models/apis/lookoutequipment/2020-12-15/paginators-1.json
Expand Up @@ -10,6 +10,11 @@
"output_token": "NextToken",
"limit_key": "MaxResults"
},
"ListInferenceEvents": {
"input_token": "NextToken",
"output_token": "NextToken",
"limit_key": "MaxResults"
},
"ListInferenceExecutions": {
"input_token": "NextToken",
"output_token": "NextToken",
Expand Down
6 changes: 3 additions & 3 deletions models/apis/mediaconvert/2017-08-29/docs-2.json
Expand Up @@ -1059,7 +1059,7 @@
}
},
"DolbyVision": {
"base": "With AWS Elemental MediaConvert, you can create profile 5 or 8.1 Dolby Vision outputs from MXF and IMF sources.",
"base": "Create Dolby Vision Profile 5 or Profile 8.1 compatible video output.",
"refs": {
"VideoPreprocessor$DolbyVision": "Enable Dolby Vision feature to produce Dolby Vision compatible video output."
}
Expand All @@ -1083,9 +1083,9 @@
}
},
"DolbyVisionProfile": {
"base": "Required when you use Dolby Vision processing. Set Profile to Profile 5 to only include frame-interleaved Dolby Vision metadata in your output. Set Profile to Profile 8.1 to include both frame-interleaved Dolby Vision metadata and HDR10 metadata in your output.",
"base": "Required when you enable Dolby Vision. Use Profile 5 to include frame-interleaved Dolby Vision metadata in your output. Your input must include Dolby Vision metadata or an HDR10 YUV color space. Use Profile 8.1 to include frame-interleaved Dolby Vision metadata and HDR10 metadata in your output. Your input must include Dolby Vision metadata.",
"refs": {
"DolbyVision$Profile": "Required when you use Dolby Vision processing. Set Profile to Profile 5 to only include frame-interleaved Dolby Vision metadata in your output. Set Profile to Profile 8.1 to include both frame-interleaved Dolby Vision metadata and HDR10 metadata in your output."
"DolbyVision$Profile": "Required when you enable Dolby Vision. Use Profile 5 to include frame-interleaved Dolby Vision metadata in your output. Your input must include Dolby Vision metadata or an HDR10 YUV color space. Use Profile 8.1 to include frame-interleaved Dolby Vision metadata and HDR10 metadata in your output. Your input must include Dolby Vision metadata."
}
},
"DropFrameTimecode": {
Expand Down

0 comments on commit 957a9e8

Please sign in to comment.