diff --git a/CHANGELOG.md b/CHANGELOG.md index 3dbe0ce749..f343bc8c93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +Release v1.41.3 (2021-10-14) +=== + +### Service Client Updates +* `service/autoscaling`: Updates service API and documentation + * Amazon EC2 Auto Scaling now supports filtering describe Auto Scaling groups API using tags +* `service/elasticloadbalancingv2`: Updates service API and documentation +* `service/robomaker`: Updates service API and documentation +* `service/sagemaker`: Updates service API and documentation + * This release updates the provisioning artifact ID to an optional parameter in CreateProject API. The provisioning artifact ID defaults to the latest provisioning artifact ID of the product if you don't provide one. + Release v1.41.2 (2021-10-13) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index abccd657cb..ebd0e4c97f 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -10381,6 +10381,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "kendra": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ diff --git a/aws/version.go b/aws/version.go index 8f5f1617e8..988001c187 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.41.2" +const SDKVersion = "1.41.3" diff --git a/models/apis/autoscaling/2011-01-01/api-2.json b/models/apis/autoscaling/2011-01-01/api-2.json index fc56d7eaea..344c181fe5 100644 --- a/models/apis/autoscaling/2011-01-01/api-2.json +++ b/models/apis/autoscaling/2011-01-01/api-2.json @@ -411,7 +411,8 @@ "resultWrapper":"DescribeLoadBalancerTargetGroupsResult" }, "errors":[ - {"shape":"ResourceContentionFault"} + {"shape":"ResourceContentionFault"}, + {"shape":"InvalidNextToken"} ] }, "DescribeLoadBalancers":{ @@ -426,7 +427,8 @@ "resultWrapper":"DescribeLoadBalancersResult" }, "errors":[ - {"shape":"ResourceContentionFault"} + {"shape":"ResourceContentionFault"}, + {"shape":"InvalidNextToken"} ] }, "DescribeMetricCollectionTypes":{ @@ -1090,7 +1092,8 @@ "members":{ "AutoScalingGroupNames":{"shape":"AutoScalingGroupNames"}, "NextToken":{"shape":"XmlString"}, - "MaxRecords":{"shape":"MaxRecords"} + "MaxRecords":{"shape":"MaxRecords"}, + "Filters":{"shape":"Filters"} } }, "AutoScalingGroupPredictedCapacity":{"type":"integer"}, diff --git a/models/apis/autoscaling/2011-01-01/docs-2.json b/models/apis/autoscaling/2011-01-01/docs-2.json index 5a4444295b..5dc088de5b 100644 --- a/models/apis/autoscaling/2011-01-01/docs-2.json +++ b/models/apis/autoscaling/2011-01-01/docs-2.json @@ -20,9 +20,9 @@ "DeleteScheduledAction": "

Deletes the specified scheduled action.

", "DeleteTags": "

Deletes the specified tags.

", "DeleteWarmPool": "

Deletes the warm pool for the specified Auto Scaling group.

For more information, see Warm pools for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.

", - "DescribeAccountLimits": "

Describes the current Amazon EC2 Auto Scaling resource quotas for your account.

When you establish an account, the account has initial quotas on the maximum number of Auto Scaling groups and launch configurations that you can create in a given Region. For more information, see Amazon EC2 Auto Scaling service quotas in the Amazon EC2 Auto Scaling User Guide.

", + "DescribeAccountLimits": "

Describes the current Amazon EC2 Auto Scaling resource quotas for your account.

When you establish an Amazon Web Services account, the account has initial quotas on the maximum number of Auto Scaling groups and launch configurations that you can create in a given Region. For more information, see Amazon EC2 Auto Scaling service quotas in the Amazon EC2 Auto Scaling User Guide.

", "DescribeAdjustmentTypes": "

Describes the available adjustment types for step scaling and simple scaling policies.

The following adjustment types are supported:

", - "DescribeAutoScalingGroups": "

Gets information about the Auto Scaling groups in the account and Region.

This operation returns information about instances in Auto Scaling groups. To retrieve information about the instances in a warm pool, you must call the DescribeWarmPool API.

", + "DescribeAutoScalingGroups": "

Gets information about the Auto Scaling groups in the account and Region.

If you specify Auto Scaling group names, the output includes information for only the specified Auto Scaling groups. If you specify filters, the output includes information for only those Auto Scaling groups that meet the filter criteria. If you do not specify group names or filters, the output includes information for all Auto Scaling groups.

This operation also returns information about instances in Auto Scaling groups. To retrieve information about the instances in a warm pool, you must call the DescribeWarmPool API.

", "DescribeAutoScalingInstances": "

Gets information about the Auto Scaling instances in the account and Region.

", "DescribeAutoScalingNotificationTypes": "

Describes the notification types that are supported by Amazon EC2 Auto Scaling.

", "DescribeInstanceRefreshes": "

Gets information about the instance refreshes for the specified Auto Scaling group.

This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes.

To help you determine the status of an instance refresh, this operation returns information about the instance refreshes you previously initiated, including their status, end time, the percentage of the instance refresh that is complete, and the number of instances remaining to update before the instance refresh is complete.

The following are the possible statuses:

", @@ -313,7 +313,7 @@ "BlockDeviceEbsEncrypted": { "base": null, "refs": { - "Ebs$Encrypted": "

Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported Instance Types. If your AMI uses encrypted volumes, you can also only launch it on supported instance types.

If you are creating a volume from a snapshot, you cannot specify an encryption value. Volumes that are created from encrypted snapshots are automatically encrypted, and volumes that are created from unencrypted snapshots are automatically unencrypted. By default, encrypted snapshots use the Amazon Web Services managed CMK that is used for EBS encryption, but you can specify a custom CMK when you create the snapshot. The ability to encrypt a snapshot during copying also allows you to apply a new CMK to an already-encrypted snapshot. Volumes restored from the resulting copy are only accessible using the new CMK.

Enabling encryption by default results in all EBS volumes being encrypted with the Amazon Web Services managed CMK or a customer managed CMK, whether or not the snapshot was encrypted.

For more information, see Using Encryption with EBS-Backed AMIs in the Amazon EC2 User Guide for Linux Instances and Required CMK key policy for use with encrypted volumes in the Amazon EC2 Auto Scaling User Guide.

" + "Ebs$Encrypted": "

Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see Supported instance types. If your AMI uses encrypted volumes, you can also only launch it on supported instance types.

If you are creating a volume from a snapshot, you cannot create an unencrypted volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using a launch configuration.

If you enable encryption by default, the EBS volumes that you create are always encrypted, either using the Amazon Web Services managed KMS key or a customer-managed KMS key, regardless of whether the snapshot was encrypted.

For more information, see Using Amazon Web Services KMS keys to encrypt Amazon EBS volumes in the Amazon EC2 Auto Scaling User Guide.

" } }, "BlockDeviceEbsIops": { @@ -337,7 +337,7 @@ "BlockDeviceEbsVolumeType": { "base": null, "refs": { - "Ebs$VolumeType": "

The volume type. For more information, see Amazon EBS Volume Types in the Amazon EC2 User Guide for Linux Instances.

Valid Values: standard | io1 | gp2 | st1 | sc1 | gp3

" + "Ebs$VolumeType": "

The volume type. For more information, see Amazon EBS volume types in the Amazon EC2 User Guide for Linux Instances.

Valid Values: standard | io1 | gp2 | st1 | sc1 | gp3

" } }, "BlockDeviceMapping": { @@ -662,7 +662,7 @@ "EbsOptimized": { "base": null, "refs": { - "CreateLaunchConfigurationType$EbsOptimized": "

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

The default value is false.

", + "CreateLaunchConfigurationType$EbsOptimized": "

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see Amazon EBS-optimized instances in the Amazon EC2 User Guide for Linux Instances.

The default value is false.

", "LaunchConfiguration$EbsOptimized": "

Specifies whether the launch configuration is optimized for EBS I/O (true) or not (false). For more information, see Amazon EBS-Optimized Instances in the Amazon EC2 User Guide for Linux Instances.

" } }, @@ -729,7 +729,7 @@ } }, "Filter": { - "base": "

Describes a filter that is used to return a more specific list of results when describing tags.

For more information, see Tagging Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User Guide.

", + "base": "

Describes a filter that is used to return a more specific list of results from a describe operation.

If you specify multiple filters, the filters are joined with an AND, and the request returns only results that match all of the specified filters.

For more information, see Tagging Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User Guide.

", "refs": { "Filters$member": null } @@ -737,6 +737,7 @@ "Filters": { "base": null, "refs": { + "AutoScalingGroupNamesType$Filters": "

One or more filters to limit the results based on specific tags.

", "DescribeTagsType$Filters": "

One or more filters to scope the tags to return. The maximum number of filters per filter type (for example, auto-scaling-group) is 1000.

" } }, @@ -1891,7 +1892,7 @@ "Values": { "base": null, "refs": { - "Filter$Values": "

One or more filter values. Filter values are case-sensitive.

" + "Filter$Values": "

One or more filter values. Filter values are case-sensitive.

If you specify multiple values for a filter, the values are joined with an OR, and the request returns all results that match any of the specified values. For example, specify \"tag:environment\" for the filter name and \"production,development\" for the filter values to find Auto Scaling groups with the tag \"environment=production\" or \"environment=development\".

" } }, "WarmPoolConfiguration": { @@ -1954,7 +1955,7 @@ "DescribeWarmPoolAnswer$NextToken": "

The token for the next set of items to return. (You received this token from a previous call.)

", "DescribeWarmPoolType$NextToken": "

The token for the next set of instances to return. (You received this token from a previous call.)

", "FailedScheduledUpdateGroupActionRequest$ErrorMessage": "

The error message accompanying the error code.

", - "Filter$Name": "

The name of the filter. The valid values are: auto-scaling-group, key, value, and propagate-at-launch.

", + "Filter$Name": "

The name of the filter.

The valid values for Name depend on the API operation that you are including the filter in, DescribeAutoScalingGroups or DescribeTags.

DescribeAutoScalingGroups

Valid values for Name include the following:

DescribeTags

Valid values for Name include the following:

", "InstancesDistribution$OnDemandAllocationStrategy": "

Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized, which is also the default value. This strategy uses the order of instance types in the LaunchTemplateOverrides to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.

", "InstancesDistribution$SpotAllocationStrategy": "

Indicates how to allocate instances across Spot Instance pools.

If the allocation strategy is lowest-price, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. Defaults to lowest-price if not specified.

If the allocation strategy is capacity-optimized (recommended), the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity. Alternatively, you can use capacity-optimized-prioritized and set the order of instance types in the list of launch template overrides from highest to lowest priority (from first to last in the list). Amazon EC2 Auto Scaling honors the instance type priorities on a best-effort basis but optimizes for capacity first.

", "LaunchConfigurationNamesType$NextToken": "

The token for the next set of items to return. (You received this token from a previous call.)

", diff --git a/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json b/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json index 352ba30977..c743df4b06 100644 --- a/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json +++ b/models/apis/elasticloadbalancingv2/2015-12-01/api-2.json @@ -1147,7 +1147,8 @@ "members":{ "Names":{"shape":"SslPolicyNames"}, "Marker":{"shape":"Marker"}, - "PageSize":{"shape":"PageSize"} + "PageSize":{"shape":"PageSize"}, + "LoadBalancerType":{"shape":"LoadBalancerTypeEnum"} } }, "DescribeSSLPoliciesOutput":{ @@ -2017,7 +2018,8 @@ "members":{ "SslProtocols":{"shape":"SslProtocols"}, "Ciphers":{"shape":"Ciphers"}, - "Name":{"shape":"SslPolicyName"} + "Name":{"shape":"SslPolicyName"}, + "SupportedLoadBalancerTypes":{"shape":"ListOfString"} } }, "SslPolicyName":{"type":"string"}, diff --git a/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json b/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json index 4044b89054..8c9e7a0a07 100644 --- a/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json +++ b/models/apis/elasticloadbalancingv2/2015-12-01/docs-2.json @@ -808,7 +808,8 @@ "HttpRequestMethodConditionConfig$Values": "

The name of the request method. The maximum size is 40 characters. The allowed characters are A-Z, hyphen (-), and underscore (_). The comparison is case sensitive. Wildcards are not supported; therefore, the method name must be an exact match.

If you specify multiple strings, the condition is satisfied if one of the strings matches the HTTP request method. We recommend that you route GET and HEAD requests in the same way, because the response to a HEAD request may be cached.

", "PathPatternConditionConfig$Values": "

One or more path patterns to compare against the request URL. The maximum size of each string is 128 characters. The comparison is case sensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character).

If you specify multiple strings, the condition is satisfied if one of them matches the request URL. The path pattern is compared only to the path of the URL, not to its query string. To compare against the query string, use QueryStringConditionConfig.

", "RuleCondition$Values": "

The condition value. Specify only when Field is host-header or path-pattern. Alternatively, to specify multiple host names or multiple path patterns, use HostHeaderConfig or PathPatternConfig.

If Field is host-header and you are not using HostHeaderConfig, you can specify a single host name (for example, my.example.com) in Values. A host name is case insensitive, can be up to 128 characters in length, and can contain any of the following characters.

If Field is path-pattern and you are not using PathPatternConfig, you can specify a single path pattern (for example, /img/*) in Values. A path pattern is case-sensitive, can be up to 128 characters in length, and can contain any of the following characters.

", - "SourceIpConditionConfig$Values": "

One or more source IP addresses, in CIDR format. You can use both IPv4 and IPv6 addresses. Wildcards are not supported.

If you specify multiple addresses, the condition is satisfied if the source IP address of the request matches one of the CIDR blocks. This condition is not satisfied by the addresses in the X-Forwarded-For header. To search for addresses in the X-Forwarded-For header, use HttpHeaderConditionConfig.

" + "SourceIpConditionConfig$Values": "

One or more source IP addresses, in CIDR format. You can use both IPv4 and IPv6 addresses. Wildcards are not supported.

If you specify multiple addresses, the condition is satisfied if the source IP address of the request matches one of the CIDR blocks. This condition is not satisfied by the addresses in the X-Forwarded-For header. To search for addresses in the X-Forwarded-For header, use HttpHeaderConditionConfig.

", + "SslPolicy$SupportedLoadBalancerTypes": "

The supported load balancers.

" } }, "Listener": { @@ -960,6 +961,7 @@ "base": null, "refs": { "CreateLoadBalancerInput$Type": "

The type of load balancer. The default is application.

", + "DescribeSSLPoliciesInput$LoadBalancerType": "

The type of load balancer. The default lists the SSL policies for all load balancers.

", "LoadBalancer$Type": "

The type of load balancer.

" } }, diff --git a/models/apis/robomaker/2018-06-29/api-2.json b/models/apis/robomaker/2018-06-29/api-2.json index d1b865b965..d9e910612c 100644 --- a/models/apis/robomaker/2018-06-29/api-2.json +++ b/models/apis/robomaker/2018-06-29/api-2.json @@ -1012,18 +1012,33 @@ "min":1, "pattern":"[a-zA-Z0-9_.\\-]*" }, + "CommandList":{ + "type":"list", + "member":{"shape":"NonEmptyString"} + }, "Compute":{ "type":"structure", "members":{ - "simulationUnitLimit":{"shape":"SimulationUnit"} + "simulationUnitLimit":{"shape":"SimulationUnit"}, + "computeType":{"shape":"ComputeType"}, + "gpuUnitLimit":{"shape":"GPUUnit"} } }, "ComputeResponse":{ "type":"structure", "members":{ - "simulationUnitLimit":{"shape":"SimulationUnit"} + "simulationUnitLimit":{"shape":"SimulationUnit"}, + "computeType":{"shape":"ComputeType"}, + "gpuUnitLimit":{"shape":"GPUUnit"} } }, + "ComputeType":{ + "type":"string", + "enum":[ + "CPU", + "GPU_AND_CPU" + ] + }, "ConcurrentDeploymentException":{ "type":"structure", "members":{ @@ -1356,7 +1371,9 @@ "members":{ "name":{"shape":"Name"}, "s3Bucket":{"shape":"S3Bucket"}, - "s3Keys":{"shape":"S3KeyOutputs"} + "s3Keys":{"shape":"S3KeyOutputs"}, + "type":{"shape":"DataSourceType"}, + "destination":{"shape":"Path"} } }, "DataSourceConfig":{ @@ -1369,7 +1386,9 @@ "members":{ "name":{"shape":"Name"}, "s3Bucket":{"shape":"S3Bucket"}, - "s3Keys":{"shape":"S3Keys"} + "s3Keys":{"shape":"S3KeysOrPrefixes"}, + "type":{"shape":"DataSourceType"}, + "destination":{"shape":"Path"} } }, "DataSourceConfigs":{ @@ -1382,6 +1401,14 @@ "type":"list", "member":{"shape":"Name"} }, + "DataSourceType":{ + "type":"string", + "enum":[ + "Prefix", + "Archive", + "File" + ] + }, "DataSources":{ "type":"list", "member":{"shape":"DataSource"} @@ -1930,6 +1957,11 @@ "min":0 }, "FloorplanCount":{"type":"integer"}, + "GPUUnit":{ + "type":"integer", + "max":1, + "min":0 + }, "GenericInteger":{"type":"integer"}, "GenericString":{ "type":"string", @@ -2005,16 +2037,13 @@ "LastUpdatedAt":{"type":"timestamp"}, "LaunchConfig":{ "type":"structure", - "required":[ - "packageName", - "launchFile" - ], "members":{ "packageName":{"shape":"Command"}, "launchFile":{"shape":"Command"}, "environmentVariables":{"shape":"EnvironmentVariableMap"}, "portForwardingConfig":{"shape":"PortForwardingConfig"}, - "streamUI":{"shape":"Boolean"} + "streamUI":{"shape":"Boolean"}, + "command":{"shape":"CommandList"} } }, "LimitExceededException":{ @@ -2482,7 +2511,8 @@ "type":"string", "enum":[ "ROS", - "ROS2" + "ROS2", + "General" ] }, "RobotSoftwareSuiteVersionType":{ @@ -2529,10 +2559,16 @@ "min":1, "pattern":".*" }, + "S3KeyOrPrefix":{ + "type":"string", + "max":1024, + "min":0, + "pattern":".*" + }, "S3KeyOutput":{ "type":"structure", "members":{ - "s3Key":{"shape":"S3Key"}, + "s3Key":{"shape":"S3KeyOrPrefix"}, "etag":{"shape":"S3Etag"} } }, @@ -2540,9 +2576,9 @@ "type":"list", "member":{"shape":"S3KeyOutput"} }, - "S3Keys":{ + "S3KeysOrPrefixes":{ "type":"list", - "member":{"shape":"S3Key"}, + "member":{"shape":"S3KeyOrPrefix"}, "max":100, "min":1 }, @@ -2760,7 +2796,8 @@ "status":{"shape":"SimulationJobStatus"}, "simulationApplicationNames":{"shape":"SimulationApplicationNames"}, "robotApplicationNames":{"shape":"RobotApplicationNames"}, - "dataSourceNames":{"shape":"DataSourceNames"} + "dataSourceNames":{"shape":"DataSourceNames"}, + "computeType":{"shape":"ComputeType"} } }, "SimulationJobs":{ @@ -2778,7 +2815,8 @@ "type":"string", "enum":[ "Gazebo", - "RosbagPlay" + "RosbagPlay", + "SimulationRuntime" ] }, "SimulationSoftwareSuiteVersionType":{ diff --git a/models/apis/robomaker/2018-06-29/docs-2.json b/models/apis/robomaker/2018-06-29/docs-2.json index 74424f29a0..f86325f192 100644 --- a/models/apis/robomaker/2018-06-29/docs-2.json +++ b/models/apis/robomaker/2018-06-29/docs-2.json @@ -333,6 +333,12 @@ "LaunchConfig$launchFile": "

The launch file name.

" } }, + "CommandList": { + "base": null, + "refs": { + "LaunchConfig$command": "

If you've specified General as the value for your RobotSoftwareSuite, you can use this field to specify a list of commands for your container image.

If you've specified SimulationRuntime as the value for your SimulationSoftwareSuite, you can use this field to specify a list of commands for your container image.

" + } + }, "Compute": { "base": "

Compute information for the simulation job.

", "refs": { @@ -348,6 +354,14 @@ "SimulationJob$compute": "

Compute information for the simulation job

" } }, + "ComputeType": { + "base": null, + "refs": { + "Compute$computeType": "

Compute type information for the simulation job.

", + "ComputeResponse$computeType": "

Compute type response information for the simulation job.

", + "SimulationJobSummary$computeType": "

The compute type for the simulation job summary.

" + } + }, "ConcurrentDeploymentException": { "base": "

The failure percentage threshold percentage was met.

", "refs": { @@ -532,6 +546,13 @@ "SimulationJobSummary$dataSourceNames": "

The names of the data sources.

" } }, + "DataSourceType": { + "base": null, + "refs": { + "DataSource$type": "

The data type for the data source that you're using for your container image or simulation job. You can use this field to specify whether your data source is an Archive, an Amazon S3 prefix, or a file.

If you don't specify a field, the default value is File.

", + "DataSourceConfig$type": "

The data type for the data source that you're using for your container image or simulation job. You can use this field to specify whether your data source is an Archive, an Amazon S3 prefix, or a file.

If you don't specify a field, the default value is File.

" + } + }, "DataSources": { "base": null, "refs": { @@ -915,6 +936,13 @@ "WorldCount$floorplanCount": "

The number of unique floorplans.

" } }, + "GPUUnit": { + "base": null, + "refs": { + "Compute$gpuUnitLimit": "

Compute GPU unit limit for the simulation job. It is the same as the number of GPUs allocated to the SimulationJob.

", + "ComputeResponse$gpuUnitLimit": "

Compute GPU unit limit for the simulation job. It is the same as the number of GPUs allocated to the SimulationJob.

" + } + }, "GenericInteger": { "base": null, "refs": { @@ -1283,6 +1311,7 @@ "NonEmptyString": { "base": null, "refs": { + "CommandList$member": null, "SecurityGroups$member": null, "Subnets$member": null } @@ -1336,6 +1365,8 @@ "Path": { "base": null, "refs": { + "DataSource$destination": "

The location where your files are mounted in the container image.

If you've specified the type of the data source as an Archive, you must provide an Amazon S3 object key to your archive. The object key must point to either a .zip or .tar.gz file.

If you've specified the type of the data source as a Prefix, you provide the Amazon S3 prefix that points to the files that you are using for your data source.

If you've specified the type of the data source as a File, you provide the Amazon S3 path to the file that you're using as your data source.

", + "DataSourceConfig$destination": "

The location where your files are mounted in the container image.

If you've specified the type of the data source as an Archive, you must provide an Amazon S3 object key to your archive. The object key must point to either a .zip or .tar.gz file.

If you've specified the type of the data source as a Prefix, you provide the Amazon S3 prefix that points to the files that you are using for your data source.

If you've specified the type of the data source as a File, you provide the Amazon S3 path to the file that you're using as your data source.

", "DeploymentLaunchConfig$preLaunchFile": "

The deployment pre-launch file. This file will be executed prior to the launch file.

", "DeploymentLaunchConfig$postLaunchFile": "

The deployment post-launch file. This file will be executed after the launch file.

", "UploadConfiguration$path": "

Specifies the path of the file(s) to upload. Standard Unix glob matching rules are accepted, with the addition of ** as a super asterisk. For example, specifying /var/log/**.log causes all .log files in the /var/log directory tree to be collected. For more examples, see Glob Library.

" @@ -1596,14 +1627,19 @@ "base": null, "refs": { "OutputLocation$s3Prefix": "

The S3 folder in the s3Bucket where output files will be placed.

", - "S3KeyOutput$s3Key": "

The S3 key.

", - "S3Keys$member": null, "S3Object$key": "

The key of the object.

", "Source$s3Key": "

The s3 object key.

", "SourceConfig$s3Key": "

The s3 object key.

", "TemplateLocation$s3Key": "

The list of S3 keys identifying the data source files.

" } }, + "S3KeyOrPrefix": { + "base": null, + "refs": { + "S3KeyOutput$s3Key": "

The S3 key.

", + "S3KeysOrPrefixes$member": null + } + }, "S3KeyOutput": { "base": "

Information about S3 keys.

", "refs": { @@ -1616,7 +1652,7 @@ "DataSource$s3Keys": "

The list of S3 keys identifying the data source files.

" } }, - "S3Keys": { + "S3KeysOrPrefixes": { "base": null, "refs": { "DataSourceConfig$s3Keys": "

The list of S3 keys identifying the data source files.

" @@ -1787,8 +1823,8 @@ "SimulationUnit": { "base": null, "refs": { - "Compute$simulationUnitLimit": "

The simulation unit limit. Your simulation is allocated CPU and memory proportional to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB of memory. You are only billed for the SU utilization you consume up to the maximim value provided. The default is 15.

", - "ComputeResponse$simulationUnitLimit": "

The simulation unit limit. Your simulation is allocated CPU and memory proportional to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB of memory. You are only billed for the SU utilization you consume up to the maximim value provided. The default is 15.

" + "Compute$simulationUnitLimit": "

The simulation unit limit. Your simulation is allocated CPU and memory proportional to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB of memory. You are only billed for the SU utilization you consume up to the maximum value provided. The default is 15.

", + "ComputeResponse$simulationUnitLimit": "

The simulation unit limit. Your simulation is allocated CPU and memory proportional to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB of memory. You are only billed for the SU utilization you consume up to the maximum value provided. The default is 15.

" } }, "Source": { diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 024d6f152e..5eb4f7eb7d 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -13420,10 +13420,7 @@ }, "ServiceCatalogProvisioningDetails":{ "type":"structure", - "required":[ - "ProductId", - "ProvisioningArtifactId" - ], + "required":["ProductId"], "members":{ "ProductId":{"shape":"ServiceCatalogEntityId"}, "ProvisioningArtifactId":{"shape":"ServiceCatalogEntityId"}, diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index f351485d73..f04ad8e37a 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -6138,7 +6138,7 @@ } }, "ModelArtifacts": { - "base": "

Provides information about the location that is configured for storing model artifacts.

Model artifacts are the output that results from training a model, and typically consist of trained parameters, a model defintion that describes how to compute inferences, and other metadata.

", + "base": "

Provides information about the location that is configured for storing model artifacts.

Model artifacts are the output that results from training a model, and typically consist of trained parameters, a model definition that describes how to compute inferences, and other metadata.

", "refs": { "DescribeCompilationJobResponse$ModelArtifacts": "

Information about the location in Amazon S3 that has been configured for storing the model artifacts used in the compilation job.

", "DescribeTrainingJobResponse$ModelArtifacts": "

Information about the Amazon S3 location that is configured for storing model artifacts.

", @@ -8718,7 +8718,7 @@ "ServiceCatalogProvisioningDetails": { "base": "

Details that you specify to provision a service catalog product. For information about service catalog, see What is Amazon Web Services Service Catalog.

", "refs": { - "CreateProjectInput$ServiceCatalogProvisioningDetails": "

The product ID and provisioning artifact ID to provision a service catalog. For information, see What is Amazon Web Services Service Catalog.

", + "CreateProjectInput$ServiceCatalogProvisioningDetails": "

The product ID and provisioning artifact ID to provision a service catalog. The provisioning artifact ID will default to the latest provisioning artifact ID of the product, if you don't provide the provisioning artifact ID. For more information, see What is Amazon Web Services Service Catalog.

", "DescribeProjectOutput$ServiceCatalogProvisioningDetails": "

Information used to provision a service catalog product. For information, see What is Amazon Web Services Service Catalog.

", "Project$ServiceCatalogProvisioningDetails": null } diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index c56eb81f76..7ca19728be 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -10016,6 +10016,11 @@ "us-gov-west-1" : { } } }, + "kendra" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "kinesis" : { "endpoints" : { "us-gov-east-1" : { diff --git a/service/autoscaling/api.go b/service/autoscaling/api.go index c58864ec80..a09645f1d0 100644 --- a/service/autoscaling/api.go +++ b/service/autoscaling/api.go @@ -1751,10 +1751,10 @@ func (c *AutoScaling) DescribeAccountLimitsRequest(input *DescribeAccountLimitsI // // Describes the current Amazon EC2 Auto Scaling resource quotas for your account. // -// When you establish an account, the account has initial quotas on the maximum -// number of Auto Scaling groups and launch configurations that you can create -// in a given Region. For more information, see Amazon EC2 Auto Scaling service -// quotas (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html) +// When you establish an Amazon Web Services account, the account has initial +// quotas on the maximum number of Auto Scaling groups and launch configurations +// that you can create in a given Region. For more information, see Amazon EC2 +// Auto Scaling service quotas (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1932,7 +1932,13 @@ func (c *AutoScaling) DescribeAutoScalingGroupsRequest(input *DescribeAutoScalin // // Gets information about the Auto Scaling groups in the account and Region. // -// This operation returns information about instances in Auto Scaling groups. +// If you specify Auto Scaling group names, the output includes information +// for only the specified Auto Scaling groups. If you specify filters, the output +// includes information for only those Auto Scaling groups that meet the filter +// criteria. If you do not specify group names or filters, the output includes +// information for all Auto Scaling groups. +// +// This operation also returns information about instances in Auto Scaling groups. // To retrieve information about the instances in a warm pool, you must call // the DescribeWarmPool API. // @@ -2746,6 +2752,9 @@ func (c *AutoScaling) DescribeLoadBalancerTargetGroupsRequest(input *DescribeLoa // You already have a pending update to an Amazon EC2 Auto Scaling resource // (for example, an Auto Scaling group, instance, or load balancer). // +// * ErrCodeInvalidNextToken "InvalidNextToken" +// The NextToken value is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLoadBalancerTargetGroups func (c *AutoScaling) DescribeLoadBalancerTargetGroups(input *DescribeLoadBalancerTargetGroupsInput) (*DescribeLoadBalancerTargetGroupsOutput, error) { req, out := c.DescribeLoadBalancerTargetGroupsRequest(input) @@ -2853,6 +2862,9 @@ func (c *AutoScaling) DescribeLoadBalancersRequest(input *DescribeLoadBalancersI // You already have a pending update to an Amazon EC2 Auto Scaling resource // (for example, an Auto Scaling group, instance, or load balancer). // +// * ErrCodeInvalidNextToken "InvalidNextToken" +// The NextToken value is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeLoadBalancers func (c *AutoScaling) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*DescribeLoadBalancersOutput, error) { req, out := c.DescribeLoadBalancersRequest(input) @@ -7684,8 +7696,8 @@ type CreateLaunchConfigurationInput struct { // EBS and an optimized configuration stack to provide optimal I/O performance. // This optimization is not available with all instance types. Additional fees // are incurred when you enable EBS optimization for an instance type that is - // not EBS-optimized by default. For more information, see Amazon EBS-Optimized - // Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) + // not EBS-optimized by default. For more information, see Amazon EBS-optimized + // instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) // in the Amazon EC2 User Guide for Linux Instances. // // The default value is false. @@ -9036,6 +9048,9 @@ type DescribeAutoScalingGroupsInput struct { // If you omit this parameter, all Auto Scaling groups are described. AutoScalingGroupNames []*string `type:"list"` + // One or more filters to limit the results based on specific tags. + Filters []*Filter `type:"list"` + // The maximum number of items to return with this call. The default value is // 50 and the maximum value is 100. MaxRecords *int64 `type:"integer"` @@ -9069,6 +9084,12 @@ func (s *DescribeAutoScalingGroupsInput) SetAutoScalingGroupNames(v []*string) * return s } +// SetFilters sets the Filters field's value. +func (s *DescribeAutoScalingGroupsInput) SetFilters(v []*Filter) *DescribeAutoScalingGroupsInput { + s.Filters = v + return s +} + // SetMaxRecords sets the MaxRecords field's value. func (s *DescribeAutoScalingGroupsInput) SetMaxRecords(v int64) *DescribeAutoScalingGroupsInput { s.MaxRecords = &v @@ -11227,27 +11248,20 @@ type Ebs struct { // Specifies whether the volume should be encrypted. Encrypted EBS volumes can // only be attached to instances that support Amazon EBS encryption. For more - // information, see Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). + // information, see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). // If your AMI uses encrypted volumes, you can also only launch it on supported // instance types. // - // If you are creating a volume from a snapshot, you cannot specify an encryption - // value. Volumes that are created from encrypted snapshots are automatically - // encrypted, and volumes that are created from unencrypted snapshots are automatically - // unencrypted. By default, encrypted snapshots use the Amazon Web Services - // managed CMK that is used for EBS encryption, but you can specify a custom - // CMK when you create the snapshot. The ability to encrypt a snapshot during - // copying also allows you to apply a new CMK to an already-encrypted snapshot. - // Volumes restored from the resulting copy are only accessible using the new - // CMK. - // - // Enabling encryption by default (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) - // results in all EBS volumes being encrypted with the Amazon Web Services managed - // CMK or a customer managed CMK, whether or not the snapshot was encrypted. - // - // For more information, see Using Encryption with EBS-Backed AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIEncryption.html) - // in the Amazon EC2 User Guide for Linux Instances and Required CMK key policy - // for use with encrypted volumes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/key-policy-requirements-EBS-encryption.html) + // If you are creating a volume from a snapshot, you cannot create an unencrypted + // volume from an encrypted snapshot. Also, you cannot specify a KMS key ID + // when using a launch configuration. + // + // If you enable encryption by default, the EBS volumes that you create are + // always encrypted, either using the Amazon Web Services managed KMS key or + // a customer-managed KMS key, regardless of whether the snapshot was encrypted. + // + // For more information, see Using Amazon Web Services KMS keys to encrypt Amazon + // EBS volumes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-data-protection.html#encryption) // in the Amazon EC2 Auto Scaling User Guide. Encrypted *bool `type:"boolean"` @@ -11295,7 +11309,7 @@ type Ebs struct { // the size of the snapshot. VolumeSize *int64 `min:"1" type:"integer"` - // The volume type. For more information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // The volume type. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon EC2 User Guide for Linux Instances. // // Valid Values: standard | io1 | gp2 | st1 | sc1 | gp3 @@ -11985,18 +11999,62 @@ func (s *FailedScheduledUpdateGroupActionRequest) SetScheduledActionName(v strin } // Describes a filter that is used to return a more specific list of results -// when describing tags. +// from a describe operation. +// +// If you specify multiple filters, the filters are joined with an AND, and +// the request returns only results that match all of the specified filters. // // For more information, see Tagging Auto Scaling groups and instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-tagging.html) // in the Amazon EC2 Auto Scaling User Guide. type Filter struct { _ struct{} `type:"structure"` - // The name of the filter. The valid values are: auto-scaling-group, key, value, - // and propagate-at-launch. + // The name of the filter. + // + // The valid values for Name depend on the API operation that you are including + // the filter in, DescribeAutoScalingGroups or DescribeTags. + // + // DescribeAutoScalingGroups + // + // Valid values for Name include the following: + // + // * tag-key - Accepts tag keys. The results will only include information + // about the Auto Scaling groups associated with these tag keys. + // + // * tag-value - Accepts tag values. The results will only include information + // about the Auto Scaling groups associated with these tag values. + // + // * tag: - Accepts the key/value combination of the tag. Use the tag + // key in the filter name and the tag value as the filter value. The results + // will only include information about the Auto Scaling groups associated + // with the specified key/value combination. + // + // DescribeTags + // + // Valid values for Name include the following: + // + // * auto-scaling-group - Accepts the names of Auto Scaling groups. The results + // will only include information about the tags associated with these Auto + // Scaling groups. + // + // * key - Accepts tag keys. The results will only include information about + // the tags associated with these tag keys. + // + // * value - Accepts tag values. The results will only include information + // about the tags associated with these tag values. + // + // * propagate-at-launch - Accepts a boolean value, which specifies whether + // tags propagate to instances at launch. The results will only include information + // about the tags associated with the specified boolean value. Name *string `type:"string"` // One or more filter values. Filter values are case-sensitive. + // + // If you specify multiple values for a filter, the values are joined with an + // OR, and the request returns all results that match any of the specified values. + // For example, specify "tag:environment" for the filter name and "production,development" + // for the filter values to find Auto Scaling groups with the tag "environment=production" + // or "environment=development". Values []*string `type:"list"` } diff --git a/service/autoscaling/examples_test.go b/service/autoscaling/examples_test.go index 6fc8a3d4da..feb34b3491 100644 --- a/service/autoscaling/examples_test.go +++ b/service/autoscaling/examples_test.go @@ -982,6 +982,8 @@ func ExampleAutoScaling_DescribeLoadBalancerTargetGroups_shared00() { switch aerr.Code() { case autoscaling.ErrCodeResourceContentionFault: fmt.Println(autoscaling.ErrCodeResourceContentionFault, aerr.Error()) + case autoscaling.ErrCodeInvalidNextToken: + fmt.Println(autoscaling.ErrCodeInvalidNextToken, aerr.Error()) default: fmt.Println(aerr.Error()) } @@ -1012,6 +1014,8 @@ func ExampleAutoScaling_DescribeLoadBalancers_shared00() { switch aerr.Code() { case autoscaling.ErrCodeResourceContentionFault: fmt.Println(autoscaling.ErrCodeResourceContentionFault, aerr.Error()) + case autoscaling.ErrCodeInvalidNextToken: + fmt.Println(autoscaling.ErrCodeInvalidNextToken, aerr.Error()) default: fmt.Println(aerr.Error()) } diff --git a/service/elbv2/api.go b/service/elbv2/api.go index 98d35a4005..6a7d1f053c 100644 --- a/service/elbv2/api.go +++ b/service/elbv2/api.go @@ -6106,6 +6106,10 @@ func (s *DescribeRulesOutput) SetRules(v []*Rule) *DescribeRulesOutput { type DescribeSSLPoliciesInput struct { _ struct{} `type:"structure"` + // The type of load balancer. The default lists the SSL policies for all load + // balancers. + LoadBalancerType *string `type:"string" enum:"LoadBalancerTypeEnum"` + // The marker for the next set of results. (You received this marker from a // previous call.) Marker *string `type:"string"` @@ -6148,6 +6152,12 @@ func (s *DescribeSSLPoliciesInput) Validate() error { return nil } +// SetLoadBalancerType sets the LoadBalancerType field's value. +func (s *DescribeSSLPoliciesInput) SetLoadBalancerType(v string) *DescribeSSLPoliciesInput { + s.LoadBalancerType = &v + return s +} + // SetMarker sets the Marker field's value. func (s *DescribeSSLPoliciesInput) SetMarker(v string) *DescribeSSLPoliciesInput { s.Marker = &v @@ -9321,6 +9331,9 @@ type SslPolicy struct { // The protocols. SslProtocols []*string `type:"list"` + + // The supported load balancers. + SupportedLoadBalancerTypes []*string `type:"list"` } // String returns the string representation. @@ -9359,6 +9372,12 @@ func (s *SslPolicy) SetSslProtocols(v []*string) *SslPolicy { return s } +// SetSupportedLoadBalancerTypes sets the SupportedLoadBalancerTypes field's value. +func (s *SslPolicy) SetSupportedLoadBalancerTypes(v []*string) *SslPolicy { + s.SupportedLoadBalancerTypes = v + return s +} + // Information about a subnet mapping. type SubnetMapping struct { _ struct{} `type:"structure"` diff --git a/service/robomaker/api.go b/service/robomaker/api.go index 27189a4b72..2d571dcb9c 100644 --- a/service/robomaker/api.go +++ b/service/robomaker/api.go @@ -6478,10 +6478,17 @@ func (s CancelWorldGenerationJobOutput) GoString() string { type Compute struct { _ struct{} `type:"structure"` + // Compute type information for the simulation job. + ComputeType *string `locationName:"computeType" type:"string" enum:"ComputeType"` + + // Compute GPU unit limit for the simulation job. It is the same as the number + // of GPUs allocated to the SimulationJob. + GpuUnitLimit *int64 `locationName:"gpuUnitLimit" type:"integer"` + // The simulation unit limit. Your simulation is allocated CPU and memory proportional // to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB // of memory. You are only billed for the SU utilization you consume up to the - // maximim value provided. The default is 15. + // maximum value provided. The default is 15. SimulationUnitLimit *int64 `locationName:"simulationUnitLimit" min:"1" type:"integer"` } @@ -6516,6 +6523,18 @@ func (s *Compute) Validate() error { return nil } +// SetComputeType sets the ComputeType field's value. +func (s *Compute) SetComputeType(v string) *Compute { + s.ComputeType = &v + return s +} + +// SetGpuUnitLimit sets the GpuUnitLimit field's value. +func (s *Compute) SetGpuUnitLimit(v int64) *Compute { + s.GpuUnitLimit = &v + return s +} + // SetSimulationUnitLimit sets the SimulationUnitLimit field's value. func (s *Compute) SetSimulationUnitLimit(v int64) *Compute { s.SimulationUnitLimit = &v @@ -6526,10 +6545,17 @@ func (s *Compute) SetSimulationUnitLimit(v int64) *Compute { type ComputeResponse struct { _ struct{} `type:"structure"` + // Compute type response information for the simulation job. + ComputeType *string `locationName:"computeType" type:"string" enum:"ComputeType"` + + // Compute GPU unit limit for the simulation job. It is the same as the number + // of GPUs allocated to the SimulationJob. + GpuUnitLimit *int64 `locationName:"gpuUnitLimit" type:"integer"` + // The simulation unit limit. Your simulation is allocated CPU and memory proportional // to the supplied simulation unit limit. A simulation unit is 1 vcpu and 2GB // of memory. You are only billed for the SU utilization you consume up to the - // maximim value provided. The default is 15. + // maximum value provided. The default is 15. SimulationUnitLimit *int64 `locationName:"simulationUnitLimit" min:"1" type:"integer"` } @@ -6551,6 +6577,18 @@ func (s ComputeResponse) GoString() string { return s.String() } +// SetComputeType sets the ComputeType field's value. +func (s *ComputeResponse) SetComputeType(v string) *ComputeResponse { + s.ComputeType = &v + return s +} + +// SetGpuUnitLimit sets the GpuUnitLimit field's value. +func (s *ComputeResponse) SetGpuUnitLimit(v int64) *ComputeResponse { + s.GpuUnitLimit = &v + return s +} + // SetSimulationUnitLimit sets the SimulationUnitLimit field's value. func (s *ComputeResponse) SetSimulationUnitLimit(v int64) *ComputeResponse { s.SimulationUnitLimit = &v @@ -9177,6 +9215,20 @@ func (s *CreateWorldTemplateOutput) SetTags(v map[string]*string) *CreateWorldTe type DataSource struct { _ struct{} `type:"structure"` + // The location where your files are mounted in the container image. + // + // If you've specified the type of the data source as an Archive, you must provide + // an Amazon S3 object key to your archive. The object key must point to either + // a .zip or .tar.gz file. + // + // If you've specified the type of the data source as a Prefix, you provide + // the Amazon S3 prefix that points to the files that you are using for your + // data source. + // + // If you've specified the type of the data source as a File, you provide the + // Amazon S3 path to the file that you're using as your data source. + Destination *string `locationName:"destination" min:"1" type:"string"` + // The name of the data source. Name *string `locationName:"name" min:"1" type:"string"` @@ -9185,6 +9237,13 @@ type DataSource struct { // The list of S3 keys identifying the data source files. S3Keys []*S3KeyOutput `locationName:"s3Keys" type:"list"` + + // The data type for the data source that you're using for your container image + // or simulation job. You can use this field to specify whether your data source + // is an Archive, an Amazon S3 prefix, or a file. + // + // If you don't specify a field, the default value is File. + Type *string `locationName:"type" type:"string" enum:"DataSourceType"` } // String returns the string representation. @@ -9205,6 +9264,12 @@ func (s DataSource) GoString() string { return s.String() } +// SetDestination sets the Destination field's value. +func (s *DataSource) SetDestination(v string) *DataSource { + s.Destination = &v + return s +} + // SetName sets the Name field's value. func (s *DataSource) SetName(v string) *DataSource { s.Name = &v @@ -9223,10 +9288,30 @@ func (s *DataSource) SetS3Keys(v []*S3KeyOutput) *DataSource { return s } +// SetType sets the Type field's value. +func (s *DataSource) SetType(v string) *DataSource { + s.Type = &v + return s +} + // Information about a data source. type DataSourceConfig struct { _ struct{} `type:"structure"` + // The location where your files are mounted in the container image. + // + // If you've specified the type of the data source as an Archive, you must provide + // an Amazon S3 object key to your archive. The object key must point to either + // a .zip or .tar.gz file. + // + // If you've specified the type of the data source as a Prefix, you provide + // the Amazon S3 prefix that points to the files that you are using for your + // data source. + // + // If you've specified the type of the data source as a File, you provide the + // Amazon S3 path to the file that you're using as your data source. + Destination *string `locationName:"destination" min:"1" type:"string"` + // The name of the data source. // // Name is a required field @@ -9241,6 +9326,13 @@ type DataSourceConfig struct { // // S3Keys is a required field S3Keys []*string `locationName:"s3Keys" min:"1" type:"list" required:"true"` + + // The data type for the data source that you're using for your container image + // or simulation job. You can use this field to specify whether your data source + // is an Archive, an Amazon S3 prefix, or a file. + // + // If you don't specify a field, the default value is File. + Type *string `locationName:"type" type:"string" enum:"DataSourceType"` } // String returns the string representation. @@ -9264,6 +9356,9 @@ func (s DataSourceConfig) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DataSourceConfig) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DataSourceConfig"} + if s.Destination != nil && len(*s.Destination) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Destination", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -9289,6 +9384,12 @@ func (s *DataSourceConfig) Validate() error { return nil } +// SetDestination sets the Destination field's value. +func (s *DataSourceConfig) SetDestination(v string) *DataSourceConfig { + s.Destination = &v + return s +} + // SetName sets the Name field's value. func (s *DataSourceConfig) SetName(v string) *DataSourceConfig { s.Name = &v @@ -9307,6 +9408,12 @@ func (s *DataSourceConfig) SetS3Keys(v []*string) *DataSourceConfig { return s } +// SetType sets the Type field's value. +func (s *DataSourceConfig) SetType(v string) *DataSourceConfig { + s.Type = &v + return s +} + type DeleteFleetInput struct { _ struct{} `type:"structure"` @@ -12860,18 +12967,21 @@ func (s *InvalidParameterException) RequestID() string { type LaunchConfig struct { _ struct{} `type:"structure"` + // If you've specified General as the value for your RobotSoftwareSuite, you + // can use this field to specify a list of commands for your container image. + // + // If you've specified SimulationRuntime as the value for your SimulationSoftwareSuite, + // you can use this field to specify a list of commands for your container image. + Command []*string `locationName:"command" type:"list"` + // The environment variables for the application launch. EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"` // The launch file name. - // - // LaunchFile is a required field - LaunchFile *string `locationName:"launchFile" min:"1" type:"string" required:"true"` + LaunchFile *string `locationName:"launchFile" min:"1" type:"string"` // The package name. - // - // PackageName is a required field - PackageName *string `locationName:"packageName" min:"1" type:"string" required:"true"` + PackageName *string `locationName:"packageName" min:"1" type:"string"` // The port forwarding configuration. PortForwardingConfig *PortForwardingConfig `locationName:"portForwardingConfig" type:"structure"` @@ -12904,15 +13014,9 @@ func (s LaunchConfig) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *LaunchConfig) Validate() error { invalidParams := request.ErrInvalidParams{Context: "LaunchConfig"} - if s.LaunchFile == nil { - invalidParams.Add(request.NewErrParamRequired("LaunchFile")) - } if s.LaunchFile != nil && len(*s.LaunchFile) < 1 { invalidParams.Add(request.NewErrParamMinLen("LaunchFile", 1)) } - if s.PackageName == nil { - invalidParams.Add(request.NewErrParamRequired("PackageName")) - } if s.PackageName != nil && len(*s.PackageName) < 1 { invalidParams.Add(request.NewErrParamMinLen("PackageName", 1)) } @@ -12928,6 +13032,12 @@ func (s *LaunchConfig) Validate() error { return nil } +// SetCommand sets the Command field's value. +func (s *LaunchConfig) SetCommand(v []*string) *LaunchConfig { + s.Command = v + return s +} + // SetEnvironmentVariables sets the EnvironmentVariables field's value. func (s *LaunchConfig) SetEnvironmentVariables(v map[string]*string) *LaunchConfig { s.EnvironmentVariables = v @@ -15724,7 +15834,7 @@ type S3KeyOutput struct { Etag *string `locationName:"etag" type:"string"` // The S3 key. - S3Key *string `locationName:"s3Key" min:"1" type:"string"` + S3Key *string `locationName:"s3Key" type:"string"` } // String returns the string representation. @@ -16734,6 +16844,9 @@ type SimulationJobSummary struct { // The Amazon Resource Name (ARN) of the simulation job. Arn *string `locationName:"arn" min:"1" type:"string"` + // The compute type for the simulation job summary. + ComputeType *string `locationName:"computeType" type:"string" enum:"ComputeType"` + // The names of the data sources. DataSourceNames []*string `locationName:"dataSourceNames" type:"list"` @@ -16778,6 +16891,12 @@ func (s *SimulationJobSummary) SetArn(v string) *SimulationJobSummary { return s } +// SetComputeType sets the ComputeType field's value. +func (s *SimulationJobSummary) SetComputeType(v string) *SimulationJobSummary { + s.ComputeType = &v + return s +} + // SetDataSourceNames sets the DataSourceNames field's value. func (s *SimulationJobSummary) SetDataSourceNames(v []*string) *SimulationJobSummary { s.DataSourceNames = v @@ -19205,6 +19324,42 @@ func Architecture_Values() []string { } } +const ( + // ComputeTypeCpu is a ComputeType enum value + ComputeTypeCpu = "CPU" + + // ComputeTypeGpuAndCpu is a ComputeType enum value + ComputeTypeGpuAndCpu = "GPU_AND_CPU" +) + +// ComputeType_Values returns all elements of the ComputeType enum +func ComputeType_Values() []string { + return []string{ + ComputeTypeCpu, + ComputeTypeGpuAndCpu, + } +} + +const ( + // DataSourceTypePrefix is a DataSourceType enum value + DataSourceTypePrefix = "Prefix" + + // DataSourceTypeArchive is a DataSourceType enum value + DataSourceTypeArchive = "Archive" + + // DataSourceTypeFile is a DataSourceType enum value + DataSourceTypeFile = "File" +) + +// DataSourceType_Values returns all elements of the DataSourceType enum +func DataSourceType_Values() []string { + return []string{ + DataSourceTypePrefix, + DataSourceTypeArchive, + DataSourceTypeFile, + } +} + const ( // DeploymentJobErrorCodeResourceNotFound is a DeploymentJobErrorCode enum value DeploymentJobErrorCodeResourceNotFound = "ResourceNotFound" @@ -19427,6 +19582,9 @@ const ( // RobotSoftwareSuiteTypeRos2 is a RobotSoftwareSuiteType enum value RobotSoftwareSuiteTypeRos2 = "ROS2" + + // RobotSoftwareSuiteTypeGeneral is a RobotSoftwareSuiteType enum value + RobotSoftwareSuiteTypeGeneral = "General" ) // RobotSoftwareSuiteType_Values returns all elements of the RobotSoftwareSuiteType enum @@ -19434,6 +19592,7 @@ func RobotSoftwareSuiteType_Values() []string { return []string{ RobotSoftwareSuiteTypeRos, RobotSoftwareSuiteTypeRos2, + RobotSoftwareSuiteTypeGeneral, } } @@ -19739,6 +19898,9 @@ const ( // SimulationSoftwareSuiteTypeRosbagPlay is a SimulationSoftwareSuiteType enum value SimulationSoftwareSuiteTypeRosbagPlay = "RosbagPlay" + + // SimulationSoftwareSuiteTypeSimulationRuntime is a SimulationSoftwareSuiteType enum value + SimulationSoftwareSuiteTypeSimulationRuntime = "SimulationRuntime" ) // SimulationSoftwareSuiteType_Values returns all elements of the SimulationSoftwareSuiteType enum @@ -19746,6 +19908,7 @@ func SimulationSoftwareSuiteType_Values() []string { return []string{ SimulationSoftwareSuiteTypeGazebo, SimulationSoftwareSuiteTypeRosbagPlay, + SimulationSoftwareSuiteTypeSimulationRuntime, } } diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index c4d776cf71..8863861d73 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -35017,7 +35017,9 @@ type CreateProjectInput struct { ProjectName *string `min:"1" type:"string" required:"true"` // The product ID and provisioning artifact ID to provision a service catalog. - // For information, see What is Amazon Web Services Service Catalog (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). + // The provisioning artifact ID will default to the latest provisioning artifact + // ID of the product, if you don't provide the provisioning artifact ID. For + // more information, see What is Amazon Web Services Service Catalog (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). // // ServiceCatalogProvisioningDetails is a required field ServiceCatalogProvisioningDetails *ServiceCatalogProvisioningDetails `type:"structure" required:"true"` @@ -66744,7 +66746,7 @@ func (s *MetricsSource) SetS3Uri(v string) *MetricsSource { // artifacts. // // Model artifacts are the output that results from training a model, and typically -// consist of trained parameters, a model defintion that describes how to compute +// consist of trained parameters, a model definition that describes how to compute // inferences, and other metadata. type ModelArtifacts struct { _ struct{} `type:"structure"` @@ -77516,9 +77518,7 @@ type ServiceCatalogProvisioningDetails struct { ProductId *string `min:"1" type:"string" required:"true"` // The ID of the provisioning artifact. - // - // ProvisioningArtifactId is a required field - ProvisioningArtifactId *string `min:"1" type:"string" required:"true"` + ProvisioningArtifactId *string `min:"1" type:"string"` // A list of key value pairs that you specify when you provision a product. ProvisioningParameters []*ProvisioningParameter `type:"list"` @@ -77554,9 +77554,6 @@ func (s *ServiceCatalogProvisioningDetails) Validate() error { if s.ProductId != nil && len(*s.ProductId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ProductId", 1)) } - if s.ProvisioningArtifactId == nil { - invalidParams.Add(request.NewErrParamRequired("ProvisioningArtifactId")) - } if s.ProvisioningArtifactId != nil && len(*s.ProvisioningArtifactId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ProvisioningArtifactId", 1)) }